* Sync up to trunk head (r64959).
[reactos.git] / ntoskrnl / mm / ARM3 / expool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include "../ARM3/miarm.h"
17
18 #undef ExAllocatePoolWithQuota
19 #undef ExAllocatePoolWithQuotaTag
20
21 /* GLOBALS ********************************************************************/
22
23 #define POOL_BIG_TABLE_ENTRY_FREE 0x1
24
25 typedef struct _POOL_DPC_CONTEXT
26 {
27 PPOOL_TRACKER_TABLE PoolTrackTable;
28 SIZE_T PoolTrackTableSize;
29 PPOOL_TRACKER_TABLE PoolTrackTableExpansion;
30 SIZE_T PoolTrackTableSizeExpansion;
31 } POOL_DPC_CONTEXT, *PPOOL_DPC_CONTEXT;
32
33 ULONG ExpNumberOfPagedPools;
34 POOL_DESCRIPTOR NonPagedPoolDescriptor;
35 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16 + 1];
36 PPOOL_DESCRIPTOR PoolVector[2];
37 PKGUARDED_MUTEX ExpPagedPoolMutex;
38 SIZE_T PoolTrackTableSize, PoolTrackTableMask;
39 SIZE_T PoolBigPageTableSize, PoolBigPageTableHash;
40 PPOOL_TRACKER_TABLE PoolTrackTable;
41 PPOOL_TRACKER_BIG_PAGES PoolBigPageTable;
42 KSPIN_LOCK ExpTaggedPoolLock;
43 ULONG PoolHitTag;
44 BOOLEAN ExStopBadTags;
45 KSPIN_LOCK ExpLargePoolTableLock;
46 ULONG ExpPoolBigEntriesInUse;
47 ULONG ExpPoolFlags;
48 ULONG ExPoolFailures;
49
50 /* Pool block/header/list access macros */
51 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
52 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER))
53 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
54 #define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
55 #define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize))
56
57 /*
58 * Pool list access debug macros, similar to Arthur's pfnlist.c work.
59 * Microsoft actually implements similar checks in the Windows Server 2003 SP1
60 * pool code, but only for checked builds.
61 *
62 * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
63 * that these checks are done even on retail builds, due to the increasing
64 * number of kernel-mode attacks which depend on dangling list pointers and other
65 * kinds of list-based attacks.
66 *
67 * For now, I will leave these checks on all the time, but later they are likely
68 * to be DBG-only, at least until there are enough kernel-mode security attacks
69 * against ReactOS to warrant the performance hit.
70 *
71 * For now, these are not made inline, so we can get good stack traces.
72 */
73 PLIST_ENTRY
74 NTAPI
75 ExpDecodePoolLink(IN PLIST_ENTRY Link)
76 {
77 return (PLIST_ENTRY)((ULONG_PTR)Link & ~1);
78 }
79
80 PLIST_ENTRY
81 NTAPI
82 ExpEncodePoolLink(IN PLIST_ENTRY Link)
83 {
84 return (PLIST_ENTRY)((ULONG_PTR)Link | 1);
85 }
86
87 VOID
88 NTAPI
89 ExpCheckPoolLinks(IN PLIST_ENTRY ListHead)
90 {
91 if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) ||
92 (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead))
93 {
94 KeBugCheckEx(BAD_POOL_HEADER,
95 3,
96 (ULONG_PTR)ListHead,
97 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink),
98 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink));
99 }
100 }
101
102 VOID
103 NTAPI
104 ExpInitializePoolListHead(IN PLIST_ENTRY ListHead)
105 {
106 ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead);
107 }
108
109 BOOLEAN
110 NTAPI
111 ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead)
112 {
113 return (ExpDecodePoolLink(ListHead->Flink) == ListHead);
114 }
115
116 VOID
117 NTAPI
118 ExpRemovePoolEntryList(IN PLIST_ENTRY Entry)
119 {
120 PLIST_ENTRY Blink, Flink;
121 Flink = ExpDecodePoolLink(Entry->Flink);
122 Blink = ExpDecodePoolLink(Entry->Blink);
123 Flink->Blink = ExpEncodePoolLink(Blink);
124 Blink->Flink = ExpEncodePoolLink(Flink);
125 }
126
127 PLIST_ENTRY
128 NTAPI
129 ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead)
130 {
131 PLIST_ENTRY Entry, Flink;
132 Entry = ExpDecodePoolLink(ListHead->Flink);
133 Flink = ExpDecodePoolLink(Entry->Flink);
134 ListHead->Flink = ExpEncodePoolLink(Flink);
135 Flink->Blink = ExpEncodePoolLink(ListHead);
136 return Entry;
137 }
138
139 PLIST_ENTRY
140 NTAPI
141 ExpRemovePoolTailList(IN PLIST_ENTRY ListHead)
142 {
143 PLIST_ENTRY Entry, Blink;
144 Entry = ExpDecodePoolLink(ListHead->Blink);
145 Blink = ExpDecodePoolLink(Entry->Blink);
146 ListHead->Blink = ExpEncodePoolLink(Blink);
147 Blink->Flink = ExpEncodePoolLink(ListHead);
148 return Entry;
149 }
150
151 VOID
152 NTAPI
153 ExpInsertPoolTailList(IN PLIST_ENTRY ListHead,
154 IN PLIST_ENTRY Entry)
155 {
156 PLIST_ENTRY Blink;
157 ExpCheckPoolLinks(ListHead);
158 Blink = ExpDecodePoolLink(ListHead->Blink);
159 Entry->Flink = ExpEncodePoolLink(ListHead);
160 Entry->Blink = ExpEncodePoolLink(Blink);
161 Blink->Flink = ExpEncodePoolLink(Entry);
162 ListHead->Blink = ExpEncodePoolLink(Entry);
163 ExpCheckPoolLinks(ListHead);
164 }
165
166 VOID
167 NTAPI
168 ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead,
169 IN PLIST_ENTRY Entry)
170 {
171 PLIST_ENTRY Flink;
172 ExpCheckPoolLinks(ListHead);
173 Flink = ExpDecodePoolLink(ListHead->Flink);
174 Entry->Flink = ExpEncodePoolLink(Flink);
175 Entry->Blink = ExpEncodePoolLink(ListHead);
176 Flink->Blink = ExpEncodePoolLink(Entry);
177 ListHead->Flink = ExpEncodePoolLink(Entry);
178 ExpCheckPoolLinks(ListHead);
179 }
180
181 VOID
182 NTAPI
183 ExpCheckPoolHeader(IN PPOOL_HEADER Entry)
184 {
185 PPOOL_HEADER PreviousEntry, NextEntry;
186
187 /* Is there a block before this one? */
188 if (Entry->PreviousSize)
189 {
190 /* Get it */
191 PreviousEntry = POOL_PREV_BLOCK(Entry);
192
193 /* The two blocks must be on the same page! */
194 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry))
195 {
196 /* Something is awry */
197 KeBugCheckEx(BAD_POOL_HEADER,
198 6,
199 (ULONG_PTR)PreviousEntry,
200 __LINE__,
201 (ULONG_PTR)Entry);
202 }
203
204 /* This block should also indicate that it's as large as we think it is */
205 if (PreviousEntry->BlockSize != Entry->PreviousSize)
206 {
207 /* Otherwise, someone corrupted one of the sizes */
208 DPRINT1("PreviousEntry BlockSize %lu, tag %.4s. Entry PreviousSize %lu, tag %.4s\n",
209 PreviousEntry->BlockSize, (char *)&PreviousEntry->PoolTag,
210 Entry->PreviousSize, (char *)&Entry->PoolTag);
211 KeBugCheckEx(BAD_POOL_HEADER,
212 5,
213 (ULONG_PTR)PreviousEntry,
214 __LINE__,
215 (ULONG_PTR)Entry);
216 }
217 }
218 else if (PAGE_ALIGN(Entry) != Entry)
219 {
220 /* If there's no block before us, we are the first block, so we should be on a page boundary */
221 KeBugCheckEx(BAD_POOL_HEADER,
222 7,
223 0,
224 __LINE__,
225 (ULONG_PTR)Entry);
226 }
227
228 /* This block must have a size */
229 if (!Entry->BlockSize)
230 {
231 /* Someone must've corrupted this field */
232 if (Entry->PreviousSize)
233 {
234 PreviousEntry = POOL_PREV_BLOCK(Entry);
235 DPRINT1("PreviousEntry tag %.4s. Entry tag %.4s\n",
236 (char *)&PreviousEntry->PoolTag,
237 (char *)&Entry->PoolTag);
238 }
239 else
240 {
241 DPRINT1("Entry tag %.4s\n",
242 (char *)&Entry->PoolTag);
243 }
244 KeBugCheckEx(BAD_POOL_HEADER,
245 8,
246 0,
247 __LINE__,
248 (ULONG_PTR)Entry);
249 }
250
251 /* Okay, now get the next block */
252 NextEntry = POOL_NEXT_BLOCK(Entry);
253
254 /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
255 if (PAGE_ALIGN(NextEntry) != NextEntry)
256 {
257 /* The two blocks must be on the same page! */
258 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry))
259 {
260 /* Something is messed up */
261 KeBugCheckEx(BAD_POOL_HEADER,
262 9,
263 (ULONG_PTR)NextEntry,
264 __LINE__,
265 (ULONG_PTR)Entry);
266 }
267
268 /* And this block should think we are as large as we truly are */
269 if (NextEntry->PreviousSize != Entry->BlockSize)
270 {
271 /* Otherwise, someone corrupted the field */
272 DPRINT1("Entry BlockSize %lu, tag %.4s. NextEntry PreviousSize %lu, tag %.4s\n",
273 Entry->BlockSize, (char *)&Entry->PoolTag,
274 NextEntry->PreviousSize, (char *)&NextEntry->PoolTag);
275 KeBugCheckEx(BAD_POOL_HEADER,
276 5,
277 (ULONG_PTR)NextEntry,
278 __LINE__,
279 (ULONG_PTR)Entry);
280 }
281 }
282 }
283
284 VOID
285 NTAPI
286 ExpCheckPoolAllocation(
287 PVOID P,
288 POOL_TYPE PoolType,
289 ULONG Tag)
290 {
291 PPOOL_HEADER Entry;
292 ULONG i;
293 KIRQL OldIrql;
294 POOL_TYPE RealPoolType;
295
296 /* Get the pool header */
297 Entry = ((PPOOL_HEADER)P) - 1;
298
299 /* Check if this is a large allocation */
300 if (PAGE_ALIGN(P) == P)
301 {
302 /* Lock the pool table */
303 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
304
305 /* Find the pool tag */
306 for (i = 0; i < PoolBigPageTableSize; i++)
307 {
308 /* Check if this is our allocation */
309 if (PoolBigPageTable[i].Va == P)
310 {
311 /* Make sure the tag is ok */
312 if (PoolBigPageTable[i].Key != Tag)
313 {
314 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, PoolBigPageTable[i].Key, Tag);
315 }
316
317 break;
318 }
319 }
320
321 /* Release the lock */
322 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
323
324 if (i == PoolBigPageTableSize)
325 {
326 /* Did not find the allocation */
327 //ASSERT(FALSE);
328 }
329
330 /* Get Pool type by address */
331 RealPoolType = MmDeterminePoolType(P);
332 }
333 else
334 {
335 /* Verify the tag */
336 if (Entry->PoolTag != Tag)
337 {
338 DPRINT1("Allocation has wrong pool tag! Expected '%.4s', got '%.4s' (0x%08lx)\n",
339 &Tag, &Entry->PoolTag, Entry->PoolTag);
340 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Entry->PoolTag, Tag);
341 }
342
343 /* Check the rest of the header */
344 ExpCheckPoolHeader(Entry);
345
346 /* Get Pool type from entry */
347 RealPoolType = (Entry->PoolType - 1);
348 }
349
350 /* Should we check the pool type? */
351 if (PoolType != -1)
352 {
353 /* Verify the pool type */
354 if (RealPoolType != PoolType)
355 {
356 DPRINT1("Wrong pool type! Expected %s, got %s\n",
357 PoolType & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool",
358 (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool");
359 KeBugCheckEx(BAD_POOL_CALLER, 0xCC, (ULONG_PTR)P, Entry->PoolTag, Tag);
360 }
361 }
362 }
363
364 VOID
365 NTAPI
366 ExpCheckPoolBlocks(IN PVOID Block)
367 {
368 BOOLEAN FoundBlock = FALSE;
369 SIZE_T Size = 0;
370 PPOOL_HEADER Entry;
371
372 /* Get the first entry for this page, make sure it really is the first */
373 Entry = PAGE_ALIGN(Block);
374 ASSERT(Entry->PreviousSize == 0);
375
376 /* Now scan each entry */
377 while (TRUE)
378 {
379 /* When we actually found our block, remember this */
380 if (Entry == Block) FoundBlock = TRUE;
381
382 /* Now validate this block header */
383 ExpCheckPoolHeader(Entry);
384
385 /* And go to the next one, keeping track of our size */
386 Size += Entry->BlockSize;
387 Entry = POOL_NEXT_BLOCK(Entry);
388
389 /* If we hit the last block, stop */
390 if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break;
391
392 /* If we hit the end of the page, stop */
393 if (PAGE_ALIGN(Entry) == Entry) break;
394 }
395
396 /* We must've found our block, and we must have hit the end of the page */
397 if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock))
398 {
399 /* Otherwise, the blocks are messed up */
400 KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry);
401 }
402 }
403
404 FORCEINLINE
405 VOID
406 ExpCheckPoolIrqlLevel(IN POOL_TYPE PoolType,
407 IN SIZE_T NumberOfBytes,
408 IN PVOID Entry)
409 {
410 //
411 // Validate IRQL: It must be APC_LEVEL or lower for Paged Pool, and it must
412 // be DISPATCH_LEVEL or lower for Non Paged Pool
413 //
414 if (((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) ?
415 (KeGetCurrentIrql() > APC_LEVEL) :
416 (KeGetCurrentIrql() > DISPATCH_LEVEL))
417 {
418 //
419 // Take the system down
420 //
421 KeBugCheckEx(BAD_POOL_CALLER,
422 !Entry ? POOL_ALLOC_IRQL_INVALID : POOL_FREE_IRQL_INVALID,
423 KeGetCurrentIrql(),
424 PoolType,
425 !Entry ? NumberOfBytes : (ULONG_PTR)Entry);
426 }
427 }
428
429 FORCEINLINE
430 ULONG
431 ExpComputeHashForTag(IN ULONG Tag,
432 IN SIZE_T BucketMask)
433 {
434 //
435 // Compute the hash by multiplying with a large prime number and then XORing
436 // with the HIDWORD of the result.
437 //
438 // Finally, AND with the bucket mask to generate a valid index/bucket into
439 // the table
440 //
441 ULONGLONG Result = (ULONGLONG)40543 * Tag;
442 return (ULONG)BucketMask & ((ULONG)Result ^ (Result >> 32));
443 }
444
445 FORCEINLINE
446 ULONG
447 ExpComputePartialHashForAddress(IN PVOID BaseAddress)
448 {
449 ULONG Result;
450 //
451 // Compute the hash by converting the address into a page number, and then
452 // XORing each nibble with the next one.
453 //
454 // We do *NOT* AND with the bucket mask at this point because big table expansion
455 // might happen. Therefore, the final step of the hash must be performed
456 // while holding the expansion pushlock, and this is why we call this a
457 // "partial" hash only.
458 //
459 Result = (ULONG)((ULONG_PTR)BaseAddress >> PAGE_SHIFT);
460 return (Result >> 24) ^ (Result >> 16) ^ (Result >> 8) ^ Result;
461 }
462
463 /* PRIVATE FUNCTIONS **********************************************************/
464
465 VOID
466 NTAPI
467 INIT_FUNCTION
468 ExpSeedHotTags(VOID)
469 {
470 ULONG i, Key, Hash, Index;
471 PPOOL_TRACKER_TABLE TrackTable = PoolTrackTable;
472 ULONG TagList[] =
473 {
474 ' oI',
475 ' laH',
476 'PldM',
477 'LooP',
478 'tSbO',
479 ' prI',
480 'bdDN',
481 'LprI',
482 'pOoI',
483 ' ldM',
484 'eliF',
485 'aVMC',
486 'dSeS',
487 'CFtN',
488 'looP',
489 'rPCT',
490 'bNMC',
491 'dTeS',
492 'sFtN',
493 'TPCT',
494 'CPCT',
495 ' yeK',
496 'qSbO',
497 'mNoI',
498 'aEoI',
499 'cPCT',
500 'aFtN',
501 '0ftN',
502 'tceS',
503 'SprI',
504 'ekoT',
505 ' eS',
506 'lCbO',
507 'cScC',
508 'lFtN',
509 'cAeS',
510 'mfSF',
511 'kWcC',
512 'miSF',
513 'CdfA',
514 'EdfA',
515 'orSF',
516 'nftN',
517 'PRIU',
518 'rFpN',
519 'RFpN',
520 'aPeS',
521 'sUeS',
522 'FpcA',
523 'MpcA',
524 'cSeS',
525 'mNbO',
526 'sFpN',
527 'uLeS',
528 'DPcS',
529 'nevE',
530 'vrqR',
531 'ldaV',
532 ' pP',
533 'SdaV',
534 ' daV',
535 'LdaV',
536 'FdaV',
537 ' GIB',
538 };
539
540 //
541 // Loop all 64 hot tags
542 //
543 ASSERT((sizeof(TagList) / sizeof(ULONG)) == 64);
544 for (i = 0; i < sizeof(TagList) / sizeof(ULONG); i++)
545 {
546 //
547 // Get the current tag, and compute its hash in the tracker table
548 //
549 Key = TagList[i];
550 Hash = ExpComputeHashForTag(Key, PoolTrackTableMask);
551
552 //
553 // Loop all the hashes in this index/bucket
554 //
555 Index = Hash;
556 while (TRUE)
557 {
558 //
559 // Find an empty entry, and make sure this isn't the last hash that
560 // can fit.
561 //
562 // On checked builds, also make sure this is the first time we are
563 // seeding this tag.
564 //
565 ASSERT(TrackTable[Hash].Key != Key);
566 if (!(TrackTable[Hash].Key) && (Hash != PoolTrackTableSize - 1))
567 {
568 //
569 // It has been seeded, move on to the next tag
570 //
571 TrackTable[Hash].Key = Key;
572 break;
573 }
574
575 //
576 // This entry was already taken, compute the next possible hash while
577 // making sure we're not back at our initial index.
578 //
579 ASSERT(TrackTable[Hash].Key != Key);
580 Hash = (Hash + 1) & PoolTrackTableMask;
581 if (Hash == Index) break;
582 }
583 }
584 }
585
586 VOID
587 NTAPI
588 ExpRemovePoolTracker(IN ULONG Key,
589 IN SIZE_T NumberOfBytes,
590 IN POOL_TYPE PoolType)
591 {
592 ULONG Hash, Index;
593 PPOOL_TRACKER_TABLE Table, TableEntry;
594 SIZE_T TableMask, TableSize;
595
596 //
597 // Remove the PROTECTED_POOL flag which is not part of the tag
598 //
599 Key &= ~PROTECTED_POOL;
600
601 //
602 // With WinDBG you can set a tag you want to break on when an allocation is
603 // attempted
604 //
605 if (Key == PoolHitTag) DbgBreakPoint();
606
607 //
608 // Why the double indirection? Because normally this function is also used
609 // when doing session pool allocations, which has another set of tables,
610 // sizes, and masks that live in session pool. Now we don't support session
611 // pool so we only ever use the regular tables, but I'm keeping the code this
612 // way so that the day we DO support session pool, it won't require that
613 // many changes
614 //
615 Table = PoolTrackTable;
616 TableMask = PoolTrackTableMask;
617 TableSize = PoolTrackTableSize;
618 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize);
619
620 //
621 // Compute the hash for this key, and loop all the possible buckets
622 //
623 Hash = ExpComputeHashForTag(Key, TableMask);
624 Index = Hash;
625 while (TRUE)
626 {
627 //
628 // Have we found the entry for this tag? */
629 //
630 TableEntry = &Table[Hash];
631 if (TableEntry->Key == Key)
632 {
633 //
634 // Decrement the counters depending on if this was paged or nonpaged
635 // pool
636 //
637 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
638 {
639 InterlockedIncrement(&TableEntry->NonPagedFrees);
640 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes,
641 -(SSIZE_T)NumberOfBytes);
642 return;
643 }
644 InterlockedIncrement(&TableEntry->PagedFrees);
645 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes,
646 -(SSIZE_T)NumberOfBytes);
647 return;
648 }
649
650 //
651 // We should have only ended up with an empty entry if we've reached
652 // the last bucket
653 //
654 if (!TableEntry->Key)
655 {
656 DPRINT1("Empty item reached in tracker table. Hash=0x%lx, TableMask=0x%lx, Tag=0x%08lx, NumberOfBytes=%lu, PoolType=%d\n",
657 Hash, TableMask, Key, (ULONG)NumberOfBytes, PoolType);
658 ASSERT(Hash == TableMask);
659 }
660
661 //
662 // This path is hit when we don't have an entry, and the current bucket
663 // is full, so we simply try the next one
664 //
665 Hash = (Hash + 1) & TableMask;
666 if (Hash == Index) break;
667 }
668
669 //
670 // And finally this path is hit when all the buckets are full, and we need
671 // some expansion. This path is not yet supported in ReactOS and so we'll
672 // ignore the tag
673 //
674 DPRINT1("Out of pool tag space, ignoring...\n");
675 }
676
677 VOID
678 NTAPI
679 ExpInsertPoolTracker(IN ULONG Key,
680 IN SIZE_T NumberOfBytes,
681 IN POOL_TYPE PoolType)
682 {
683 ULONG Hash, Index;
684 KIRQL OldIrql;
685 PPOOL_TRACKER_TABLE Table, TableEntry;
686 SIZE_T TableMask, TableSize;
687
688 //
689 // Remove the PROTECTED_POOL flag which is not part of the tag
690 //
691 Key &= ~PROTECTED_POOL;
692
693 //
694 // With WinDBG you can set a tag you want to break on when an allocation is
695 // attempted
696 //
697 if (Key == PoolHitTag) DbgBreakPoint();
698
699 //
700 // There is also an internal flag you can set to break on malformed tags
701 //
702 if (ExStopBadTags) ASSERT(Key & 0xFFFFFF00);
703
704 //
705 // ASSERT on ReactOS features not yet supported
706 //
707 ASSERT(!(PoolType & SESSION_POOL_MASK));
708 ASSERT(KeGetCurrentProcessorNumber() == 0);
709
710 //
711 // Why the double indirection? Because normally this function is also used
712 // when doing session pool allocations, which has another set of tables,
713 // sizes, and masks that live in session pool. Now we don't support session
714 // pool so we only ever use the regular tables, but I'm keeping the code this
715 // way so that the day we DO support session pool, it won't require that
716 // many changes
717 //
718 Table = PoolTrackTable;
719 TableMask = PoolTrackTableMask;
720 TableSize = PoolTrackTableSize;
721 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize);
722
723 //
724 // Compute the hash for this key, and loop all the possible buckets
725 //
726 Hash = ExpComputeHashForTag(Key, TableMask);
727 Index = Hash;
728 while (TRUE)
729 {
730 //
731 // Do we already have an entry for this tag? */
732 //
733 TableEntry = &Table[Hash];
734 if (TableEntry->Key == Key)
735 {
736 //
737 // Increment the counters depending on if this was paged or nonpaged
738 // pool
739 //
740 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
741 {
742 InterlockedIncrement(&TableEntry->NonPagedAllocs);
743 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes, NumberOfBytes);
744 return;
745 }
746 InterlockedIncrement(&TableEntry->PagedAllocs);
747 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes, NumberOfBytes);
748 return;
749 }
750
751 //
752 // We don't have an entry yet, but we've found a free bucket for it
753 //
754 if (!(TableEntry->Key) && (Hash != PoolTrackTableSize - 1))
755 {
756 //
757 // We need to hold the lock while creating a new entry, since other
758 // processors might be in this code path as well
759 //
760 ExAcquireSpinLock(&ExpTaggedPoolLock, &OldIrql);
761 if (!PoolTrackTable[Hash].Key)
762 {
763 //
764 // We've won the race, so now create this entry in the bucket
765 //
766 ASSERT(Table[Hash].Key == 0);
767 PoolTrackTable[Hash].Key = Key;
768 TableEntry->Key = Key;
769 }
770 ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql);
771
772 //
773 // Now we force the loop to run again, and we should now end up in
774 // the code path above which does the interlocked increments...
775 //
776 continue;
777 }
778
779 //
780 // This path is hit when we don't have an entry, and the current bucket
781 // is full, so we simply try the next one
782 //
783 Hash = (Hash + 1) & TableMask;
784 if (Hash == Index) break;
785 }
786
787 //
788 // And finally this path is hit when all the buckets are full, and we need
789 // some expansion. This path is not yet supported in ReactOS and so we'll
790 // ignore the tag
791 //
792 DPRINT1("Out of pool tag space, ignoring...\n");
793 }
794
795 VOID
796 NTAPI
797 INIT_FUNCTION
798 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor,
799 IN POOL_TYPE PoolType,
800 IN ULONG PoolIndex,
801 IN ULONG Threshold,
802 IN PVOID PoolLock)
803 {
804 PLIST_ENTRY NextEntry, LastEntry;
805
806 //
807 // Setup the descriptor based on the caller's request
808 //
809 PoolDescriptor->PoolType = PoolType;
810 PoolDescriptor->PoolIndex = PoolIndex;
811 PoolDescriptor->Threshold = Threshold;
812 PoolDescriptor->LockAddress = PoolLock;
813
814 //
815 // Initialize accounting data
816 //
817 PoolDescriptor->RunningAllocs = 0;
818 PoolDescriptor->RunningDeAllocs = 0;
819 PoolDescriptor->TotalPages = 0;
820 PoolDescriptor->TotalBytes = 0;
821 PoolDescriptor->TotalBigPages = 0;
822
823 //
824 // Nothing pending for now
825 //
826 PoolDescriptor->PendingFrees = NULL;
827 PoolDescriptor->PendingFreeDepth = 0;
828
829 //
830 // Loop all the descriptor's allocation lists and initialize them
831 //
832 NextEntry = PoolDescriptor->ListHeads;
833 LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
834 while (NextEntry < LastEntry)
835 {
836 ExpInitializePoolListHead(NextEntry);
837 NextEntry++;
838 }
839
840 //
841 // Note that ReactOS does not support Session Pool Yet
842 //
843 ASSERT(PoolType != PagedPoolSession);
844 }
845
846 VOID
847 NTAPI
848 INIT_FUNCTION
849 InitializePool(IN POOL_TYPE PoolType,
850 IN ULONG Threshold)
851 {
852 PPOOL_DESCRIPTOR Descriptor;
853 SIZE_T TableSize;
854 ULONG i;
855
856 //
857 // Check what kind of pool this is
858 //
859 if (PoolType == NonPagedPool)
860 {
861 //
862 // Compute the track table size and convert it from a power of two to an
863 // actual byte size
864 //
865 // NOTE: On checked builds, we'll assert if the registry table size was
866 // invalid, while on retail builds we'll just break out of the loop at
867 // that point.
868 //
869 TableSize = min(PoolTrackTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
870 for (i = 0; i < 32; i++)
871 {
872 if (TableSize & 1)
873 {
874 ASSERT((TableSize & ~1) == 0);
875 if (!(TableSize & ~1)) break;
876 }
877 TableSize >>= 1;
878 }
879
880 //
881 // If we hit bit 32, than no size was defined in the registry, so
882 // we'll use the default size of 2048 entries.
883 //
884 // Otherwise, use the size from the registry, as long as it's not
885 // smaller than 64 entries.
886 //
887 if (i == 32)
888 {
889 PoolTrackTableSize = 2048;
890 }
891 else
892 {
893 PoolTrackTableSize = max(1 << i, 64);
894 }
895
896 //
897 // Loop trying with the biggest specified size first, and cut it down
898 // by a power of two each iteration in case not enough memory exist
899 //
900 while (TRUE)
901 {
902 //
903 // Do not allow overflow
904 //
905 if ((PoolTrackTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_TABLE)))
906 {
907 PoolTrackTableSize >>= 1;
908 continue;
909 }
910
911 //
912 // Allocate the tracker table and exit the loop if this worked
913 //
914 PoolTrackTable = MiAllocatePoolPages(NonPagedPool,
915 (PoolTrackTableSize + 1) *
916 sizeof(POOL_TRACKER_TABLE));
917 if (PoolTrackTable) break;
918
919 //
920 // Otherwise, as long as we're not down to the last bit, keep
921 // iterating
922 //
923 if (PoolTrackTableSize == 1)
924 {
925 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
926 TableSize,
927 0xFFFFFFFF,
928 0xFFFFFFFF,
929 0xFFFFFFFF);
930 }
931 PoolTrackTableSize >>= 1;
932 }
933
934 //
935 // Add one entry, compute the hash, and zero the table
936 //
937 PoolTrackTableSize++;
938 PoolTrackTableMask = PoolTrackTableSize - 2;
939
940 RtlZeroMemory(PoolTrackTable,
941 PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
942
943 //
944 // Finally, add the most used tags to speed up those allocations
945 //
946 ExpSeedHotTags();
947
948 //
949 // We now do the exact same thing with the tracker table for big pages
950 //
951 TableSize = min(PoolBigPageTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
952 for (i = 0; i < 32; i++)
953 {
954 if (TableSize & 1)
955 {
956 ASSERT((TableSize & ~1) == 0);
957 if (!(TableSize & ~1)) break;
958 }
959 TableSize >>= 1;
960 }
961
962 //
963 // For big pages, the default tracker table is 4096 entries, while the
964 // minimum is still 64
965 //
966 if (i == 32)
967 {
968 PoolBigPageTableSize = 4096;
969 }
970 else
971 {
972 PoolBigPageTableSize = max(1 << i, 64);
973 }
974
975 //
976 // Again, run the exact same loop we ran earlier, but this time for the
977 // big pool tracker instead
978 //
979 while (TRUE)
980 {
981 if ((PoolBigPageTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_BIG_PAGES)))
982 {
983 PoolBigPageTableSize >>= 1;
984 continue;
985 }
986
987 PoolBigPageTable = MiAllocatePoolPages(NonPagedPool,
988 PoolBigPageTableSize *
989 sizeof(POOL_TRACKER_BIG_PAGES));
990 if (PoolBigPageTable) break;
991
992 if (PoolBigPageTableSize == 1)
993 {
994 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
995 TableSize,
996 0xFFFFFFFF,
997 0xFFFFFFFF,
998 0xFFFFFFFF);
999 }
1000
1001 PoolBigPageTableSize >>= 1;
1002 }
1003
1004 //
1005 // An extra entry is not needed for for the big pool tracker, so just
1006 // compute the hash and zero it
1007 //
1008 PoolBigPageTableHash = PoolBigPageTableSize - 1;
1009 RtlZeroMemory(PoolBigPageTable,
1010 PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
1011 for (i = 0; i < PoolBigPageTableSize; i++) PoolBigPageTable[i].Va = (PVOID)1;
1012
1013 //
1014 // During development, print this out so we can see what's happening
1015 //
1016 DPRINT("EXPOOL: Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1017 PoolTrackTable, PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1018 DPRINT("EXPOOL: Big Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1019 PoolBigPageTable, PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
1020
1021 //
1022 // Insert the generic tracker for all of big pool
1023 //
1024 ExpInsertPoolTracker('looP',
1025 ROUND_TO_PAGES(PoolBigPageTableSize *
1026 sizeof(POOL_TRACKER_BIG_PAGES)),
1027 NonPagedPool);
1028
1029 //
1030 // No support for NUMA systems at this time
1031 //
1032 ASSERT(KeNumberNodes == 1);
1033
1034 //
1035 // Initialize the tag spinlock
1036 //
1037 KeInitializeSpinLock(&ExpTaggedPoolLock);
1038
1039 //
1040 // Initialize the nonpaged pool descriptor
1041 //
1042 PoolVector[NonPagedPool] = &NonPagedPoolDescriptor;
1043 ExInitializePoolDescriptor(PoolVector[NonPagedPool],
1044 NonPagedPool,
1045 0,
1046 Threshold,
1047 NULL);
1048 }
1049 else
1050 {
1051 //
1052 // No support for NUMA systems at this time
1053 //
1054 ASSERT(KeNumberNodes == 1);
1055
1056 //
1057 // Allocate the pool descriptor
1058 //
1059 Descriptor = ExAllocatePoolWithTag(NonPagedPool,
1060 sizeof(KGUARDED_MUTEX) +
1061 sizeof(POOL_DESCRIPTOR),
1062 'looP');
1063 if (!Descriptor)
1064 {
1065 //
1066 // This is really bad...
1067 //
1068 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1069 0,
1070 -1,
1071 -1,
1072 -1);
1073 }
1074
1075 //
1076 // Setup the vector and guarded mutex for paged pool
1077 //
1078 PoolVector[PagedPool] = Descriptor;
1079 ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1);
1080 ExpPagedPoolDescriptor[0] = Descriptor;
1081 KeInitializeGuardedMutex(ExpPagedPoolMutex);
1082 ExInitializePoolDescriptor(Descriptor,
1083 PagedPool,
1084 0,
1085 Threshold,
1086 ExpPagedPoolMutex);
1087
1088 //
1089 // Insert the generic tracker for all of nonpaged pool
1090 //
1091 ExpInsertPoolTracker('looP',
1092 ROUND_TO_PAGES(PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)),
1093 NonPagedPool);
1094 }
1095 }
1096
1097 FORCEINLINE
1098 KIRQL
1099 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
1100 {
1101 //
1102 // Check if this is nonpaged pool
1103 //
1104 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1105 {
1106 //
1107 // Use the queued spin lock
1108 //
1109 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
1110 }
1111 else
1112 {
1113 //
1114 // Use the guarded mutex
1115 //
1116 KeAcquireGuardedMutex(Descriptor->LockAddress);
1117 return APC_LEVEL;
1118 }
1119 }
1120
1121 FORCEINLINE
1122 VOID
1123 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor,
1124 IN KIRQL OldIrql)
1125 {
1126 //
1127 // Check if this is nonpaged pool
1128 //
1129 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1130 {
1131 //
1132 // Use the queued spin lock
1133 //
1134 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
1135 }
1136 else
1137 {
1138 //
1139 // Use the guarded mutex
1140 //
1141 KeReleaseGuardedMutex(Descriptor->LockAddress);
1142 }
1143 }
1144
1145 VOID
1146 NTAPI
1147 ExpGetPoolTagInfoTarget(IN PKDPC Dpc,
1148 IN PVOID DeferredContext,
1149 IN PVOID SystemArgument1,
1150 IN PVOID SystemArgument2)
1151 {
1152 PPOOL_DPC_CONTEXT Context = DeferredContext;
1153 UNREFERENCED_PARAMETER(Dpc);
1154 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
1155
1156 //
1157 // Make sure we win the race, and if we did, copy the data atomically
1158 //
1159 if (KeSignalCallDpcSynchronize(SystemArgument2))
1160 {
1161 RtlCopyMemory(Context->PoolTrackTable,
1162 PoolTrackTable,
1163 Context->PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1164
1165 //
1166 // This is here because ReactOS does not yet support expansion
1167 //
1168 ASSERT(Context->PoolTrackTableSizeExpansion == 0);
1169 }
1170
1171 //
1172 // Regardless of whether we won or not, we must now synchronize and then
1173 // decrement the barrier since this is one more processor that has completed
1174 // the callback.
1175 //
1176 KeSignalCallDpcSynchronize(SystemArgument2);
1177 KeSignalCallDpcDone(SystemArgument1);
1178 }
1179
1180 NTSTATUS
1181 NTAPI
1182 ExGetPoolTagInfo(IN PSYSTEM_POOLTAG_INFORMATION SystemInformation,
1183 IN ULONG SystemInformationLength,
1184 IN OUT PULONG ReturnLength OPTIONAL)
1185 {
1186 ULONG TableSize, CurrentLength;
1187 ULONG EntryCount;
1188 NTSTATUS Status = STATUS_SUCCESS;
1189 PSYSTEM_POOLTAG TagEntry;
1190 PPOOL_TRACKER_TABLE Buffer, TrackerEntry;
1191 POOL_DPC_CONTEXT Context;
1192 ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
1193
1194 //
1195 // Keep track of how much data the caller's buffer must hold
1196 //
1197 CurrentLength = FIELD_OFFSET(SYSTEM_POOLTAG_INFORMATION, TagInfo);
1198
1199 //
1200 // Initialize the caller's buffer
1201 //
1202 TagEntry = &SystemInformation->TagInfo[0];
1203 SystemInformation->Count = 0;
1204
1205 //
1206 // Capture the number of entries, and the total size needed to make a copy
1207 // of the table
1208 //
1209 EntryCount = (ULONG)PoolTrackTableSize;
1210 TableSize = EntryCount * sizeof(POOL_TRACKER_TABLE);
1211
1212 //
1213 // Allocate the "Generic DPC" temporary buffer
1214 //
1215 Buffer = ExAllocatePoolWithTag(NonPagedPool, TableSize, 'ofnI');
1216 if (!Buffer) return STATUS_INSUFFICIENT_RESOURCES;
1217
1218 //
1219 // Do a "Generic DPC" to atomically retrieve the tag and allocation data
1220 //
1221 Context.PoolTrackTable = Buffer;
1222 Context.PoolTrackTableSize = PoolTrackTableSize;
1223 Context.PoolTrackTableExpansion = NULL;
1224 Context.PoolTrackTableSizeExpansion = 0;
1225 KeGenericCallDpc(ExpGetPoolTagInfoTarget, &Context);
1226
1227 //
1228 // Now parse the results
1229 //
1230 for (TrackerEntry = Buffer; TrackerEntry < (Buffer + EntryCount); TrackerEntry++)
1231 {
1232 //
1233 // If the entry is empty, skip it
1234 //
1235 if (!TrackerEntry->Key) continue;
1236
1237 //
1238 // Otherwise, add one more entry to the caller's buffer, and ensure that
1239 // enough space has been allocated in it
1240 //
1241 SystemInformation->Count++;
1242 CurrentLength += sizeof(*TagEntry);
1243 if (SystemInformationLength < CurrentLength)
1244 {
1245 //
1246 // The caller's buffer is too small, so set a failure code. The
1247 // caller will know the count, as well as how much space is needed.
1248 //
1249 // We do NOT break out of the loop, because we want to keep incrementing
1250 // the Count as well as CurrentLength so that the caller can know the
1251 // final numbers
1252 //
1253 Status = STATUS_INFO_LENGTH_MISMATCH;
1254 }
1255 else
1256 {
1257 //
1258 // Small sanity check that our accounting is working correctly
1259 //
1260 ASSERT(TrackerEntry->PagedAllocs >= TrackerEntry->PagedFrees);
1261 ASSERT(TrackerEntry->NonPagedAllocs >= TrackerEntry->NonPagedFrees);
1262
1263 //
1264 // Return the data into the caller's buffer
1265 //
1266 TagEntry->TagUlong = TrackerEntry->Key;
1267 TagEntry->PagedAllocs = TrackerEntry->PagedAllocs;
1268 TagEntry->PagedFrees = TrackerEntry->PagedFrees;
1269 TagEntry->PagedUsed = TrackerEntry->PagedBytes;
1270 TagEntry->NonPagedAllocs = TrackerEntry->NonPagedAllocs;
1271 TagEntry->NonPagedFrees = TrackerEntry->NonPagedFrees;
1272 TagEntry->NonPagedUsed = TrackerEntry->NonPagedBytes;
1273 TagEntry++;
1274 }
1275 }
1276
1277 //
1278 // Free the "Generic DPC" temporary buffer, return the buffer length and status
1279 //
1280 ExFreePoolWithTag(Buffer, 'ofnI');
1281 if (ReturnLength) *ReturnLength = CurrentLength;
1282 return Status;
1283 }
1284
1285 BOOLEAN
1286 NTAPI
1287 ExpAddTagForBigPages(IN PVOID Va,
1288 IN ULONG Key,
1289 IN ULONG NumberOfPages,
1290 IN POOL_TYPE PoolType)
1291 {
1292 ULONG Hash, i = 0;
1293 PVOID OldVa;
1294 KIRQL OldIrql;
1295 SIZE_T TableSize;
1296 PPOOL_TRACKER_BIG_PAGES Entry, EntryEnd, EntryStart;
1297 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1298 ASSERT(!(PoolType & SESSION_POOL_MASK));
1299
1300 //
1301 // As the table is expandable, these values must only be read after acquiring
1302 // the lock to avoid a teared access during an expansion
1303 //
1304 Hash = ExpComputePartialHashForAddress(Va);
1305 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1306 Hash &= PoolBigPageTableHash;
1307 TableSize = PoolBigPageTableSize;
1308
1309 //
1310 // We loop from the current hash bucket to the end of the table, and then
1311 // rollover to hash bucket 0 and keep going from there. If we return back
1312 // to the beginning, then we attempt expansion at the bottom of the loop
1313 //
1314 EntryStart = Entry = &PoolBigPageTable[Hash];
1315 EntryEnd = &PoolBigPageTable[TableSize];
1316 do
1317 {
1318 //
1319 // Make sure that this is a free entry and attempt to atomically make the
1320 // entry busy now
1321 //
1322 OldVa = Entry->Va;
1323 if (((ULONG_PTR)OldVa & POOL_BIG_TABLE_ENTRY_FREE) &&
1324 (InterlockedCompareExchangePointer(&Entry->Va, Va, OldVa) == OldVa))
1325 {
1326 //
1327 // We now own this entry, write down the size and the pool tag
1328 //
1329 Entry->Key = Key;
1330 Entry->NumberOfPages = NumberOfPages;
1331
1332 //
1333 // Add one more entry to the count, and see if we're getting within
1334 // 25% of the table size, at which point we'll do an expansion now
1335 // to avoid blocking too hard later on.
1336 //
1337 // Note that we only do this if it's also been the 16th time that we
1338 // keep losing the race or that we are not finding a free entry anymore,
1339 // which implies a massive number of concurrent big pool allocations.
1340 //
1341 InterlockedIncrementUL(&ExpPoolBigEntriesInUse);
1342 if ((i >= 16) && (ExpPoolBigEntriesInUse > (TableSize / 4)))
1343 {
1344 DPRINT("Should attempt expansion since we now have %lu entries\n",
1345 ExpPoolBigEntriesInUse);
1346 }
1347
1348 //
1349 // We have our entry, return
1350 //
1351 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1352 return TRUE;
1353 }
1354
1355 //
1356 // We don't have our entry yet, so keep trying, making the entry list
1357 // circular if we reach the last entry. We'll eventually break out of
1358 // the loop once we've rolled over and returned back to our original
1359 // hash bucket
1360 //
1361 i++;
1362 if (++Entry >= EntryEnd) Entry = &PoolBigPageTable[0];
1363 } while (Entry != EntryStart);
1364
1365 //
1366 // This means there's no free hash buckets whatsoever, so we would now have
1367 // to attempt expanding the table
1368 //
1369 DPRINT1("Big pool expansion needed, not implemented!\n");
1370 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1371 return FALSE;
1372 }
1373
1374 ULONG
1375 NTAPI
1376 ExpFindAndRemoveTagBigPages(IN PVOID Va,
1377 OUT PULONG_PTR BigPages,
1378 IN POOL_TYPE PoolType)
1379 {
1380 BOOLEAN FirstTry = TRUE;
1381 SIZE_T TableSize;
1382 KIRQL OldIrql;
1383 ULONG PoolTag, Hash;
1384 PPOOL_TRACKER_BIG_PAGES Entry;
1385 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1386 ASSERT(!(PoolType & SESSION_POOL_MASK));
1387
1388 //
1389 // As the table is expandable, these values must only be read after acquiring
1390 // the lock to avoid a teared access during an expansion
1391 //
1392 Hash = ExpComputePartialHashForAddress(Va);
1393 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1394 Hash &= PoolBigPageTableHash;
1395 TableSize = PoolBigPageTableSize;
1396
1397 //
1398 // Loop while trying to find this big page allocation
1399 //
1400 while (PoolBigPageTable[Hash].Va != Va)
1401 {
1402 //
1403 // Increment the size until we go past the end of the table
1404 //
1405 if (++Hash >= TableSize)
1406 {
1407 //
1408 // Is this the second time we've tried?
1409 //
1410 if (!FirstTry)
1411 {
1412 //
1413 // This means it was never inserted into the pool table and it
1414 // received the special "BIG" tag -- return that and return 0
1415 // so that the code can ask Mm for the page count instead
1416 //
1417 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1418 *BigPages = 0;
1419 return ' GIB';
1420 }
1421
1422 //
1423 // The first time this happens, reset the hash index and try again
1424 //
1425 Hash = 0;
1426 FirstTry = FALSE;
1427 }
1428 }
1429
1430 //
1431 // Now capture all the information we need from the entry, since after we
1432 // release the lock, the data can change
1433 //
1434 Entry = &PoolBigPageTable[Hash];
1435 *BigPages = Entry->NumberOfPages;
1436 PoolTag = Entry->Key;
1437
1438 //
1439 // Set the free bit, and decrement the number of allocations. Finally, release
1440 // the lock and return the tag that was located
1441 //
1442 InterlockedIncrement((PLONG)&Entry->Va);
1443 InterlockedDecrementUL(&ExpPoolBigEntriesInUse);
1444 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1445 return PoolTag;
1446 }
1447
1448 VOID
1449 NTAPI
1450 ExQueryPoolUsage(OUT PULONG PagedPoolPages,
1451 OUT PULONG NonPagedPoolPages,
1452 OUT PULONG PagedPoolAllocs,
1453 OUT PULONG PagedPoolFrees,
1454 OUT PULONG PagedPoolLookasideHits,
1455 OUT PULONG NonPagedPoolAllocs,
1456 OUT PULONG NonPagedPoolFrees,
1457 OUT PULONG NonPagedPoolLookasideHits)
1458 {
1459 ULONG i;
1460 PPOOL_DESCRIPTOR PoolDesc;
1461
1462 //
1463 // Assume all failures
1464 //
1465 *PagedPoolPages = 0;
1466 *PagedPoolAllocs = 0;
1467 *PagedPoolFrees = 0;
1468
1469 //
1470 // Tally up the totals for all the apged pool
1471 //
1472 for (i = 0; i < ExpNumberOfPagedPools + 1; i++)
1473 {
1474 PoolDesc = ExpPagedPoolDescriptor[i];
1475 *PagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1476 *PagedPoolAllocs += PoolDesc->RunningAllocs;
1477 *PagedPoolFrees += PoolDesc->RunningDeAllocs;
1478 }
1479
1480 //
1481 // The first non-paged pool has a hardcoded well-known descriptor name
1482 //
1483 PoolDesc = &NonPagedPoolDescriptor;
1484 *NonPagedPoolPages = PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1485 *NonPagedPoolAllocs = PoolDesc->RunningAllocs;
1486 *NonPagedPoolFrees = PoolDesc->RunningDeAllocs;
1487
1488 //
1489 // If the system has more than one non-paged pool, copy the other descriptor
1490 // totals as well
1491 //
1492 #if 0
1493 if (ExpNumberOfNonPagedPools > 1)
1494 {
1495 for (i = 0; i < ExpNumberOfNonPagedPools; i++)
1496 {
1497 PoolDesc = ExpNonPagedPoolDescriptor[i];
1498 *NonPagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1499 *NonPagedPoolAllocs += PoolDesc->RunningAllocs;
1500 *NonPagedPoolFrees += PoolDesc->RunningDeAllocs;
1501 }
1502 }
1503 #endif
1504
1505 //
1506 // FIXME: Not yet supported
1507 //
1508 *NonPagedPoolLookasideHits += 0;
1509 *PagedPoolLookasideHits += 0;
1510 }
1511
1512 /* PUBLIC FUNCTIONS ***********************************************************/
1513
1514 /*
1515 * @implemented
1516 */
1517 PVOID
1518 NTAPI
1519 ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
1520 IN SIZE_T NumberOfBytes,
1521 IN ULONG Tag)
1522 {
1523 PPOOL_DESCRIPTOR PoolDesc;
1524 PLIST_ENTRY ListHead;
1525 PPOOL_HEADER Entry, NextEntry, FragmentEntry;
1526 KIRQL OldIrql;
1527 USHORT BlockSize, i;
1528 ULONG OriginalType;
1529 PKPRCB Prcb = KeGetCurrentPrcb();
1530 PGENERAL_LOOKASIDE LookasideList;
1531
1532 //
1533 // Some sanity checks
1534 //
1535 ASSERT(Tag != 0);
1536 ASSERT(Tag != ' GIB');
1537 ASSERT(NumberOfBytes != 0);
1538 ExpCheckPoolIrqlLevel(PoolType, NumberOfBytes, NULL);
1539
1540 //
1541 // Not supported in ReactOS
1542 //
1543 ASSERT(!(PoolType & SESSION_POOL_MASK));
1544
1545 //
1546 // Check if verifier or special pool is enabled
1547 //
1548 if (ExpPoolFlags & (POOL_FLAG_VERIFIER | POOL_FLAG_SPECIAL_POOL))
1549 {
1550 //
1551 // For verifier, we should call the verification routine
1552 //
1553 if (ExpPoolFlags & POOL_FLAG_VERIFIER)
1554 {
1555 DPRINT1("Driver Verifier is not yet supported\n");
1556 }
1557
1558 //
1559 // For special pool, we check if this is a suitable allocation and do
1560 // the special allocation if needed
1561 //
1562 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
1563 {
1564 //
1565 // Check if this is a special pool allocation
1566 //
1567 if (MmUseSpecialPool(NumberOfBytes, Tag))
1568 {
1569 //
1570 // Try to allocate using special pool
1571 //
1572 Entry = MmAllocateSpecialPool(NumberOfBytes, Tag, PoolType, 2);
1573 if (Entry) return Entry;
1574 }
1575 }
1576 }
1577
1578 //
1579 // Get the pool type and its corresponding vector for this request
1580 //
1581 OriginalType = PoolType;
1582 PoolType = PoolType & BASE_POOL_TYPE_MASK;
1583 PoolDesc = PoolVector[PoolType];
1584 ASSERT(PoolDesc != NULL);
1585
1586 //
1587 // Check if this is a big page allocation
1588 //
1589 if (NumberOfBytes > POOL_MAX_ALLOC)
1590 {
1591 //
1592 // Allocate pages for it
1593 //
1594 Entry = MiAllocatePoolPages(OriginalType, NumberOfBytes);
1595 if (!Entry)
1596 {
1597 //
1598 // Must succeed pool is deprecated, but still supported. These allocation
1599 // failures must cause an immediate bugcheck
1600 //
1601 if (OriginalType & MUST_SUCCEED_POOL_MASK)
1602 {
1603 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1604 NumberOfBytes,
1605 NonPagedPoolDescriptor.TotalPages,
1606 NonPagedPoolDescriptor.TotalBigPages,
1607 0);
1608 }
1609
1610 //
1611 // Internal debugging
1612 //
1613 ExPoolFailures++;
1614
1615 //
1616 // This flag requests printing failures, and can also further specify
1617 // breaking on failures
1618 //
1619 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
1620 {
1621 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
1622 NumberOfBytes,
1623 OriginalType);
1624 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
1625 }
1626
1627 //
1628 // Finally, this flag requests an exception, which we are more than
1629 // happy to raise!
1630 //
1631 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
1632 {
1633 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
1634 }
1635 }
1636
1637 //
1638 // Increment required counters
1639 //
1640 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
1641 (LONG)BYTES_TO_PAGES(NumberOfBytes));
1642 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, NumberOfBytes);
1643 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
1644
1645 //
1646 // Add a tag for the big page allocation and switch to the generic "BIG"
1647 // tag if we failed to do so, then insert a tracker for this alloation.
1648 //
1649 if (!ExpAddTagForBigPages(Entry,
1650 Tag,
1651 (ULONG)BYTES_TO_PAGES(NumberOfBytes),
1652 OriginalType))
1653 {
1654 Tag = ' GIB';
1655 }
1656 ExpInsertPoolTracker(Tag, ROUND_TO_PAGES(NumberOfBytes), OriginalType);
1657 return Entry;
1658 }
1659
1660 //
1661 // Should never request 0 bytes from the pool, but since so many drivers do
1662 // it, we'll just assume they want 1 byte, based on NT's similar behavior
1663 //
1664 if (!NumberOfBytes) NumberOfBytes = 1;
1665
1666 //
1667 // A pool allocation is defined by its data, a linked list to connect it to
1668 // the free list (if necessary), and a pool header to store accounting info.
1669 // Calculate this size, then convert it into a block size (units of pool
1670 // headers)
1671 //
1672 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
1673 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
1674 // the direct allocation of pages.
1675 //
1676 i = (USHORT)((NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1))
1677 / POOL_BLOCK_SIZE);
1678 ASSERT(i < POOL_LISTS_PER_PAGE);
1679
1680 //
1681 // Handle lookaside list optimization for both paged and nonpaged pool
1682 //
1683 if (i <= NUMBER_POOL_LOOKASIDE_LISTS)
1684 {
1685 //
1686 // Try popping it from the per-CPU lookaside list
1687 //
1688 LookasideList = (PoolType == PagedPool) ?
1689 Prcb->PPPagedLookasideList[i - 1].P :
1690 Prcb->PPNPagedLookasideList[i - 1].P;
1691 LookasideList->TotalAllocates++;
1692 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
1693 if (!Entry)
1694 {
1695 //
1696 // We failed, try popping it from the global list
1697 //
1698 LookasideList = (PoolType == PagedPool) ?
1699 Prcb->PPPagedLookasideList[i - 1].L :
1700 Prcb->PPNPagedLookasideList[i - 1].L;
1701 LookasideList->TotalAllocates++;
1702 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
1703 }
1704
1705 //
1706 // If we were able to pop it, update the accounting and return the block
1707 //
1708 if (Entry)
1709 {
1710 LookasideList->AllocateHits++;
1711
1712 //
1713 // Get the real entry, write down its pool type, and track it
1714 //
1715 Entry--;
1716 Entry->PoolType = OriginalType + 1;
1717 ExpInsertPoolTracker(Tag,
1718 Entry->BlockSize * POOL_BLOCK_SIZE,
1719 OriginalType);
1720
1721 //
1722 // Return the pool allocation
1723 //
1724 Entry->PoolTag = Tag;
1725 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
1726 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
1727 return POOL_FREE_BLOCK(Entry);
1728 }
1729 }
1730
1731 //
1732 // Loop in the free lists looking for a block if this size. Start with the
1733 // list optimized for this kind of size lookup
1734 //
1735 ListHead = &PoolDesc->ListHeads[i];
1736 do
1737 {
1738 //
1739 // Are there any free entries available on this list?
1740 //
1741 if (!ExpIsPoolListEmpty(ListHead))
1742 {
1743 //
1744 // Acquire the pool lock now
1745 //
1746 OldIrql = ExLockPool(PoolDesc);
1747
1748 //
1749 // And make sure the list still has entries
1750 //
1751 if (ExpIsPoolListEmpty(ListHead))
1752 {
1753 //
1754 // Someone raced us (and won) before we had a chance to acquire
1755 // the lock.
1756 //
1757 // Try again!
1758 //
1759 ExUnlockPool(PoolDesc, OldIrql);
1760 continue;
1761 }
1762
1763 //
1764 // Remove a free entry from the list
1765 // Note that due to the way we insert free blocks into multiple lists
1766 // there is a guarantee that any block on this list will either be
1767 // of the correct size, or perhaps larger.
1768 //
1769 ExpCheckPoolLinks(ListHead);
1770 Entry = POOL_ENTRY(ExpRemovePoolHeadList(ListHead));
1771 ExpCheckPoolLinks(ListHead);
1772 ExpCheckPoolBlocks(Entry);
1773 ASSERT(Entry->BlockSize >= i);
1774 ASSERT(Entry->PoolType == 0);
1775
1776 //
1777 // Check if this block is larger that what we need. The block could
1778 // not possibly be smaller, due to the reason explained above (and
1779 // we would've asserted on a checked build if this was the case).
1780 //
1781 if (Entry->BlockSize != i)
1782 {
1783 //
1784 // Is there an entry before this one?
1785 //
1786 if (Entry->PreviousSize == 0)
1787 {
1788 //
1789 // There isn't anyone before us, so take the next block and
1790 // turn it into a fragment that contains the leftover data
1791 // that we don't need to satisfy the caller's request
1792 //
1793 FragmentEntry = POOL_BLOCK(Entry, i);
1794 FragmentEntry->BlockSize = Entry->BlockSize - i;
1795
1796 //
1797 // And make it point back to us
1798 //
1799 FragmentEntry->PreviousSize = i;
1800
1801 //
1802 // Now get the block that follows the new fragment and check
1803 // if it's still on the same page as us (and not at the end)
1804 //
1805 NextEntry = POOL_NEXT_BLOCK(FragmentEntry);
1806 if (PAGE_ALIGN(NextEntry) != NextEntry)
1807 {
1808 //
1809 // Adjust this next block to point to our newly created
1810 // fragment block
1811 //
1812 NextEntry->PreviousSize = FragmentEntry->BlockSize;
1813 }
1814 }
1815 else
1816 {
1817 //
1818 // There is a free entry before us, which we know is smaller
1819 // so we'll make this entry the fragment instead
1820 //
1821 FragmentEntry = Entry;
1822
1823 //
1824 // And then we'll remove from it the actual size required.
1825 // Now the entry is a leftover free fragment
1826 //
1827 Entry->BlockSize -= i;
1828
1829 //
1830 // Now let's go to the next entry after the fragment (which
1831 // used to point to our original free entry) and make it
1832 // reference the new fragment entry instead.
1833 //
1834 // This is the entry that will actually end up holding the
1835 // allocation!
1836 //
1837 Entry = POOL_NEXT_BLOCK(Entry);
1838 Entry->PreviousSize = FragmentEntry->BlockSize;
1839
1840 //
1841 // And now let's go to the entry after that one and check if
1842 // it's still on the same page, and not at the end
1843 //
1844 NextEntry = POOL_BLOCK(Entry, i);
1845 if (PAGE_ALIGN(NextEntry) != NextEntry)
1846 {
1847 //
1848 // Make it reference the allocation entry
1849 //
1850 NextEntry->PreviousSize = i;
1851 }
1852 }
1853
1854 //
1855 // Now our (allocation) entry is the right size
1856 //
1857 Entry->BlockSize = i;
1858
1859 //
1860 // And the next entry is now the free fragment which contains
1861 // the remaining difference between how big the original entry
1862 // was, and the actual size the caller needs/requested.
1863 //
1864 FragmentEntry->PoolType = 0;
1865 BlockSize = FragmentEntry->BlockSize;
1866
1867 //
1868 // Now check if enough free bytes remained for us to have a
1869 // "full" entry, which contains enough bytes for a linked list
1870 // and thus can be used for allocations (up to 8 bytes...)
1871 //
1872 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
1873 if (BlockSize != 1)
1874 {
1875 //
1876 // Insert the free entry into the free list for this size
1877 //
1878 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
1879 POOL_FREE_BLOCK(FragmentEntry));
1880 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
1881 }
1882 }
1883
1884 //
1885 // We have found an entry for this allocation, so set the pool type
1886 // and release the lock since we're done
1887 //
1888 Entry->PoolType = OriginalType + 1;
1889 ExpCheckPoolBlocks(Entry);
1890 ExUnlockPool(PoolDesc, OldIrql);
1891
1892 //
1893 // Increment required counters
1894 //
1895 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
1896 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
1897
1898 //
1899 // Track this allocation
1900 //
1901 ExpInsertPoolTracker(Tag,
1902 Entry->BlockSize * POOL_BLOCK_SIZE,
1903 OriginalType);
1904
1905 //
1906 // Return the pool allocation
1907 //
1908 Entry->PoolTag = Tag;
1909 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
1910 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
1911 return POOL_FREE_BLOCK(Entry);
1912 }
1913 } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
1914
1915 //
1916 // There were no free entries left, so we have to allocate a new fresh page
1917 //
1918 Entry = MiAllocatePoolPages(OriginalType, PAGE_SIZE);
1919 if (!Entry)
1920 {
1921 //
1922 // Must succeed pool is deprecated, but still supported. These allocation
1923 // failures must cause an immediate bugcheck
1924 //
1925 if (OriginalType & MUST_SUCCEED_POOL_MASK)
1926 {
1927 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1928 PAGE_SIZE,
1929 NonPagedPoolDescriptor.TotalPages,
1930 NonPagedPoolDescriptor.TotalBigPages,
1931 0);
1932 }
1933
1934 //
1935 // Internal debugging
1936 //
1937 ExPoolFailures++;
1938
1939 //
1940 // This flag requests printing failures, and can also further specify
1941 // breaking on failures
1942 //
1943 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
1944 {
1945 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
1946 NumberOfBytes,
1947 OriginalType);
1948 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
1949 }
1950
1951 //
1952 // Finally, this flag requests an exception, which we are more than
1953 // happy to raise!
1954 //
1955 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
1956 {
1957 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
1958 }
1959
1960 //
1961 // Return NULL to the caller in all other cases
1962 //
1963 return NULL;
1964 }
1965
1966 //
1967 // Setup the entry data
1968 //
1969 Entry->Ulong1 = 0;
1970 Entry->BlockSize = i;
1971 Entry->PoolType = OriginalType + 1;
1972
1973 //
1974 // This page will have two entries -- one for the allocation (which we just
1975 // created above), and one for the remaining free bytes, which we're about
1976 // to create now. The free bytes are the whole page minus what was allocated
1977 // and then converted into units of block headers.
1978 //
1979 BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i;
1980 FragmentEntry = POOL_BLOCK(Entry, i);
1981 FragmentEntry->Ulong1 = 0;
1982 FragmentEntry->BlockSize = BlockSize;
1983 FragmentEntry->PreviousSize = i;
1984
1985 //
1986 // Increment required counters
1987 //
1988 InterlockedIncrement((PLONG)&PoolDesc->TotalPages);
1989 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
1990
1991 //
1992 // Now check if enough free bytes remained for us to have a "full" entry,
1993 // which contains enough bytes for a linked list and thus can be used for
1994 // allocations (up to 8 bytes...)
1995 //
1996 if (FragmentEntry->BlockSize != 1)
1997 {
1998 //
1999 // Excellent -- acquire the pool lock
2000 //
2001 OldIrql = ExLockPool(PoolDesc);
2002
2003 //
2004 // And insert the free entry into the free list for this block size
2005 //
2006 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2007 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2008 POOL_FREE_BLOCK(FragmentEntry));
2009 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2010
2011 //
2012 // Release the pool lock
2013 //
2014 ExpCheckPoolBlocks(Entry);
2015 ExUnlockPool(PoolDesc, OldIrql);
2016 }
2017 else
2018 {
2019 //
2020 // Simply do a sanity check
2021 //
2022 ExpCheckPoolBlocks(Entry);
2023 }
2024
2025 //
2026 // Increment performance counters and track this allocation
2027 //
2028 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
2029 ExpInsertPoolTracker(Tag,
2030 Entry->BlockSize * POOL_BLOCK_SIZE,
2031 OriginalType);
2032
2033 //
2034 // And return the pool allocation
2035 //
2036 ExpCheckPoolBlocks(Entry);
2037 Entry->PoolTag = Tag;
2038 return POOL_FREE_BLOCK(Entry);
2039 }
2040
2041 /*
2042 * @implemented
2043 */
2044 PVOID
2045 NTAPI
2046 ExAllocatePool(POOL_TYPE PoolType,
2047 SIZE_T NumberOfBytes)
2048 {
2049 ULONG Tag = TAG_NONE;
2050 #if 0 && DBG
2051 PLDR_DATA_TABLE_ENTRY LdrEntry;
2052
2053 /* Use the first four letters of the driver name, or "None" if unavailable */
2054 LdrEntry = KeGetCurrentIrql() <= APC_LEVEL
2055 ? MiLookupDataTableEntry(_ReturnAddress())
2056 : NULL;
2057 if (LdrEntry)
2058 {
2059 ULONG i;
2060 Tag = 0;
2061 for (i = 0; i < min(4, LdrEntry->BaseDllName.Length / sizeof(WCHAR)); i++)
2062 Tag = Tag >> 8 | (LdrEntry->BaseDllName.Buffer[i] & 0xff) << 24;
2063 for (; i < 4; i++)
2064 Tag = Tag >> 8 | ' ' << 24;
2065 }
2066 #endif
2067 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2068 }
2069
2070 /*
2071 * @implemented
2072 */
2073 VOID
2074 NTAPI
2075 ExFreePoolWithTag(IN PVOID P,
2076 IN ULONG TagToFree)
2077 {
2078 PPOOL_HEADER Entry, NextEntry;
2079 USHORT BlockSize;
2080 KIRQL OldIrql;
2081 POOL_TYPE PoolType;
2082 PPOOL_DESCRIPTOR PoolDesc;
2083 ULONG Tag;
2084 BOOLEAN Combined = FALSE;
2085 PFN_NUMBER PageCount, RealPageCount;
2086 PKPRCB Prcb = KeGetCurrentPrcb();
2087 PGENERAL_LOOKASIDE LookasideList;
2088 PEPROCESS Process;
2089
2090 //
2091 // Check if any of the debug flags are enabled
2092 //
2093 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
2094 POOL_FLAG_CHECK_WORKERS |
2095 POOL_FLAG_CHECK_RESOURCES |
2096 POOL_FLAG_VERIFIER |
2097 POOL_FLAG_CHECK_DEADLOCK |
2098 POOL_FLAG_SPECIAL_POOL))
2099 {
2100 //
2101 // Check if special pool is enabled
2102 //
2103 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
2104 {
2105 //
2106 // Check if it was allocated from a special pool
2107 //
2108 if (MmIsSpecialPoolAddress(P))
2109 {
2110 //
2111 // Was deadlock verification also enabled? We can do some extra
2112 // checks at this point
2113 //
2114 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2115 {
2116 DPRINT1("Verifier not yet supported\n");
2117 }
2118
2119 //
2120 // It is, so handle it via special pool free routine
2121 //
2122 MmFreeSpecialPool(P);
2123 return;
2124 }
2125 }
2126
2127 //
2128 // For non-big page allocations, we'll do a bunch of checks in here
2129 //
2130 if (PAGE_ALIGN(P) != P)
2131 {
2132 //
2133 // Get the entry for this pool allocation
2134 // The pointer math here may look wrong or confusing, but it is quite right
2135 //
2136 Entry = P;
2137 Entry--;
2138
2139 //
2140 // Get the pool type
2141 //
2142 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2143
2144 //
2145 // FIXME: Many other debugging checks go here
2146 //
2147 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2148 }
2149 }
2150
2151 //
2152 // Check if this is a big page allocation
2153 //
2154 if (PAGE_ALIGN(P) == P)
2155 {
2156 //
2157 // We need to find the tag for it, so first we need to find out what
2158 // kind of allocation this was (paged or nonpaged), then we can go
2159 // ahead and try finding the tag for it. Remember to get rid of the
2160 // PROTECTED_POOL tag if it's found.
2161 //
2162 // Note that if at insertion time, we failed to add the tag for a big
2163 // pool allocation, we used a special tag called 'BIG' to identify the
2164 // allocation, and we may get this tag back. In this scenario, we must
2165 // manually get the size of the allocation by actually counting through
2166 // the PFN database.
2167 //
2168 PoolType = MmDeterminePoolType(P);
2169 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2170 Tag = ExpFindAndRemoveTagBigPages(P, &PageCount, PoolType);
2171 if (!Tag)
2172 {
2173 DPRINT1("We do not know the size of this allocation. This is not yet supported\n");
2174 ASSERT(Tag == ' GIB');
2175 PageCount = 1; // We are going to lie! This might screw up accounting?
2176 }
2177 else if (Tag & PROTECTED_POOL)
2178 {
2179 Tag &= ~PROTECTED_POOL;
2180 }
2181
2182 //
2183 // Check block tag
2184 //
2185 if (TagToFree && TagToFree != Tag)
2186 {
2187 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2188 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2189 }
2190
2191 //
2192 // We have our tag and our page count, so we can go ahead and remove this
2193 // tracker now
2194 //
2195 ExpRemovePoolTracker(Tag, PageCount << PAGE_SHIFT, PoolType);
2196
2197 //
2198 // Check if any of the debug flags are enabled
2199 //
2200 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
2201 POOL_FLAG_CHECK_WORKERS |
2202 POOL_FLAG_CHECK_RESOURCES |
2203 POOL_FLAG_CHECK_DEADLOCK))
2204 {
2205 //
2206 // Was deadlock verification also enabled? We can do some extra
2207 // checks at this point
2208 //
2209 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2210 {
2211 DPRINT1("Verifier not yet supported\n");
2212 }
2213
2214 //
2215 // FIXME: Many debugging checks go here
2216 //
2217 }
2218
2219 //
2220 // Update counters
2221 //
2222 PoolDesc = PoolVector[PoolType];
2223 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2224 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes,
2225 -(LONG_PTR)(PageCount << PAGE_SHIFT));
2226
2227 //
2228 // Do the real free now and update the last counter with the big page count
2229 //
2230 RealPageCount = MiFreePoolPages(P);
2231 ASSERT(RealPageCount == PageCount);
2232 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
2233 -(LONG)RealPageCount);
2234 return;
2235 }
2236
2237 //
2238 // Get the entry for this pool allocation
2239 // The pointer math here may look wrong or confusing, but it is quite right
2240 //
2241 Entry = P;
2242 Entry--;
2243 ASSERT((ULONG_PTR)Entry % POOL_BLOCK_SIZE == 0);
2244
2245 //
2246 // Get the size of the entry, and it's pool type, then load the descriptor
2247 // for this pool type
2248 //
2249 BlockSize = Entry->BlockSize;
2250 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2251 PoolDesc = PoolVector[PoolType];
2252
2253 //
2254 // Make sure that the IRQL makes sense
2255 //
2256 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2257
2258 //
2259 // Get the pool tag and get rid of the PROTECTED_POOL flag
2260 //
2261 Tag = Entry->PoolTag;
2262 if (Tag & PROTECTED_POOL) Tag &= ~PROTECTED_POOL;
2263
2264 //
2265 // Check block tag
2266 //
2267 if (TagToFree && TagToFree != Tag)
2268 {
2269 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2270 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2271 }
2272
2273 //
2274 // Track the removal of this allocation
2275 //
2276 ExpRemovePoolTracker(Tag,
2277 BlockSize * POOL_BLOCK_SIZE,
2278 Entry->PoolType - 1);
2279
2280 //
2281 // Release pool quota, if any
2282 //
2283 if ((Entry->PoolType - 1) & QUOTA_POOL_MASK)
2284 {
2285 Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
2286 ASSERT(Process != NULL);
2287 if (Process)
2288 {
2289 if (Process->Pcb.Header.Type != ProcessObject)
2290 {
2291 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
2292 Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
2293 KeBugCheckEx(BAD_POOL_CALLER,
2294 0x0D,
2295 (ULONG_PTR)P,
2296 Tag,
2297 (ULONG_PTR)Process);
2298 }
2299 PsReturnPoolQuota(Process, PoolType, BlockSize * POOL_BLOCK_SIZE);
2300 ObDereferenceObject(Process);
2301 }
2302 }
2303
2304 //
2305 // Is this allocation small enough to have come from a lookaside list?
2306 //
2307 if (BlockSize <= NUMBER_POOL_LOOKASIDE_LISTS)
2308 {
2309 //
2310 // Try pushing it into the per-CPU lookaside list
2311 //
2312 LookasideList = (PoolType == PagedPool) ?
2313 Prcb->PPPagedLookasideList[BlockSize - 1].P :
2314 Prcb->PPNPagedLookasideList[BlockSize - 1].P;
2315 LookasideList->TotalFrees++;
2316 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2317 {
2318 LookasideList->FreeHits++;
2319 InterlockedPushEntrySList(&LookasideList->ListHead, P);
2320 return;
2321 }
2322
2323 //
2324 // We failed, try to push it into the global lookaside list
2325 //
2326 LookasideList = (PoolType == PagedPool) ?
2327 Prcb->PPPagedLookasideList[BlockSize - 1].L :
2328 Prcb->PPNPagedLookasideList[BlockSize - 1].L;
2329 LookasideList->TotalFrees++;
2330 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2331 {
2332 LookasideList->FreeHits++;
2333 InterlockedPushEntrySList(&LookasideList->ListHead, P);
2334 return;
2335 }
2336 }
2337
2338 //
2339 // Get the pointer to the next entry
2340 //
2341 NextEntry = POOL_BLOCK(Entry, BlockSize);
2342
2343 //
2344 // Update performance counters
2345 //
2346 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2347 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, -BlockSize * POOL_BLOCK_SIZE);
2348
2349 //
2350 // Acquire the pool lock
2351 //
2352 OldIrql = ExLockPool(PoolDesc);
2353
2354 //
2355 // Check if the next allocation is at the end of the page
2356 //
2357 ExpCheckPoolBlocks(Entry);
2358 if (PAGE_ALIGN(NextEntry) != NextEntry)
2359 {
2360 //
2361 // We may be able to combine the block if it's free
2362 //
2363 if (NextEntry->PoolType == 0)
2364 {
2365 //
2366 // The next block is free, so we'll do a combine
2367 //
2368 Combined = TRUE;
2369
2370 //
2371 // Make sure there's actual data in the block -- anything smaller
2372 // than this means we only have the header, so there's no linked list
2373 // for us to remove
2374 //
2375 if ((NextEntry->BlockSize != 1))
2376 {
2377 //
2378 // The block is at least big enough to have a linked list, so go
2379 // ahead and remove it
2380 //
2381 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2382 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2383 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2384 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2385 }
2386
2387 //
2388 // Our entry is now combined with the next entry
2389 //
2390 Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
2391 }
2392 }
2393
2394 //
2395 // Now check if there was a previous entry on the same page as us
2396 //
2397 if (Entry->PreviousSize)
2398 {
2399 //
2400 // Great, grab that entry and check if it's free
2401 //
2402 NextEntry = POOL_PREV_BLOCK(Entry);
2403 if (NextEntry->PoolType == 0)
2404 {
2405 //
2406 // It is, so we can do a combine
2407 //
2408 Combined = TRUE;
2409
2410 //
2411 // Make sure there's actual data in the block -- anything smaller
2412 // than this means we only have the header so there's no linked list
2413 // for us to remove
2414 //
2415 if ((NextEntry->BlockSize != 1))
2416 {
2417 //
2418 // The block is at least big enough to have a linked list, so go
2419 // ahead and remove it
2420 //
2421 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2422 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2423 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2424 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2425 }
2426
2427 //
2428 // Combine our original block (which might've already been combined
2429 // with the next block), into the previous block
2430 //
2431 NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
2432
2433 //
2434 // And now we'll work with the previous block instead
2435 //
2436 Entry = NextEntry;
2437 }
2438 }
2439
2440 //
2441 // By now, it may have been possible for our combined blocks to actually
2442 // have made up a full page (if there were only 2-3 allocations on the
2443 // page, they could've all been combined).
2444 //
2445 if ((PAGE_ALIGN(Entry) == Entry) &&
2446 (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry)) == POOL_NEXT_BLOCK(Entry)))
2447 {
2448 //
2449 // In this case, release the pool lock, update the performance counter,
2450 // and free the page
2451 //
2452 ExUnlockPool(PoolDesc, OldIrql);
2453 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalPages, -1);
2454 MiFreePoolPages(Entry);
2455 return;
2456 }
2457
2458 //
2459 // Otherwise, we now have a free block (or a combination of 2 or 3)
2460 //
2461 Entry->PoolType = 0;
2462 BlockSize = Entry->BlockSize;
2463 ASSERT(BlockSize != 1);
2464
2465 //
2466 // Check if we actually did combine it with anyone
2467 //
2468 if (Combined)
2469 {
2470 //
2471 // Get the first combined block (either our original to begin with, or
2472 // the one after the original, depending if we combined with the previous)
2473 //
2474 NextEntry = POOL_NEXT_BLOCK(Entry);
2475
2476 //
2477 // As long as the next block isn't on a page boundary, have it point
2478 // back to us
2479 //
2480 if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
2481 }
2482
2483 //
2484 // Insert this new free block, and release the pool lock
2485 //
2486 ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
2487 ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry));
2488 ExUnlockPool(PoolDesc, OldIrql);
2489 }
2490
2491 /*
2492 * @implemented
2493 */
2494 VOID
2495 NTAPI
2496 ExFreePool(PVOID P)
2497 {
2498 //
2499 // Just free without checking for the tag
2500 //
2501 ExFreePoolWithTag(P, 0);
2502 }
2503
2504 /*
2505 * @unimplemented
2506 */
2507 SIZE_T
2508 NTAPI
2509 ExQueryPoolBlockSize(IN PVOID PoolBlock,
2510 OUT PBOOLEAN QuotaCharged)
2511 {
2512 //
2513 // Not implemented
2514 //
2515 UNIMPLEMENTED;
2516 return FALSE;
2517 }
2518
2519 /*
2520 * @implemented
2521 */
2522
2523 PVOID
2524 NTAPI
2525 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType,
2526 IN SIZE_T NumberOfBytes)
2527 {
2528 //
2529 // Allocate the pool
2530 //
2531 return ExAllocatePoolWithQuotaTag(PoolType, NumberOfBytes, TAG_NONE);
2532 }
2533
2534 /*
2535 * @implemented
2536 */
2537 PVOID
2538 NTAPI
2539 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType,
2540 IN SIZE_T NumberOfBytes,
2541 IN ULONG Tag,
2542 IN EX_POOL_PRIORITY Priority)
2543 {
2544 //
2545 // Allocate the pool
2546 //
2547 UNIMPLEMENTED;
2548 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2549 }
2550
2551 /*
2552 * @implemented
2553 */
2554 PVOID
2555 NTAPI
2556 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType,
2557 IN SIZE_T NumberOfBytes,
2558 IN ULONG Tag)
2559 {
2560 BOOLEAN Raise = TRUE;
2561 PVOID Buffer;
2562 PPOOL_HEADER Entry;
2563 NTSTATUS Status;
2564 PEPROCESS Process = PsGetCurrentProcess();
2565
2566 //
2567 // Check if we should fail instead of raising an exception
2568 //
2569 if (PoolType & POOL_QUOTA_FAIL_INSTEAD_OF_RAISE)
2570 {
2571 Raise = FALSE;
2572 PoolType &= ~POOL_QUOTA_FAIL_INSTEAD_OF_RAISE;
2573 }
2574
2575 //
2576 // Inject the pool quota mask
2577 //
2578 PoolType += QUOTA_POOL_MASK;
2579
2580 //
2581 // Check if we have enough space to add the quota owner process, as long as
2582 // this isn't the system process, which never gets charged quota
2583 //
2584 ASSERT(NumberOfBytes != 0);
2585 if ((NumberOfBytes <= (PAGE_SIZE - POOL_BLOCK_SIZE - sizeof(PVOID))) &&
2586 (Process != PsInitialSystemProcess))
2587 {
2588 //
2589 // Add space for our EPROCESS pointer
2590 //
2591 NumberOfBytes += sizeof(PEPROCESS);
2592 }
2593 else
2594 {
2595 //
2596 // We won't be able to store the pointer, so don't use quota for this
2597 //
2598 PoolType -= QUOTA_POOL_MASK;
2599 }
2600
2601 //
2602 // Allocate the pool buffer now
2603 //
2604 Buffer = ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2605
2606 //
2607 // If the buffer is page-aligned, this is a large page allocation and we
2608 // won't touch it
2609 //
2610 if (PAGE_ALIGN(Buffer) != Buffer)
2611 {
2612 //
2613 // Also if special pool is enabled, and this was allocated from there,
2614 // we won't touch it either
2615 //
2616 if ((ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) &&
2617 (MmIsSpecialPoolAddress(Buffer)))
2618 {
2619 return Buffer;
2620 }
2621
2622 //
2623 // If it wasn't actually allocated with quota charges, ignore it too
2624 //
2625 if (!(PoolType & QUOTA_POOL_MASK)) return Buffer;
2626
2627 //
2628 // If this is the system process, we don't charge quota, so ignore
2629 //
2630 if (Process == PsInitialSystemProcess) return Buffer;
2631
2632 //
2633 // Actually go and charge quota for the process now
2634 //
2635 Entry = POOL_ENTRY(Buffer);
2636 Status = PsChargeProcessPoolQuota(Process,
2637 PoolType & BASE_POOL_TYPE_MASK,
2638 Entry->BlockSize * POOL_BLOCK_SIZE);
2639 if (!NT_SUCCESS(Status))
2640 {
2641 //
2642 // Quota failed, back out the allocation, clear the owner, and fail
2643 //
2644 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
2645 ExFreePoolWithTag(Buffer, Tag);
2646 if (Raise) RtlRaiseStatus(Status);
2647 return NULL;
2648 }
2649
2650 //
2651 // Quota worked, write the owner and then reference it before returning
2652 //
2653 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = Process;
2654 ObReferenceObject(Process);
2655 }
2656 else if (!(Buffer) && (Raise))
2657 {
2658 //
2659 // The allocation failed, raise an error if we are in raise mode
2660 //
2661 RtlRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
2662 }
2663
2664 //
2665 // Return the allocated buffer
2666 //
2667 return Buffer;
2668 }
2669
2670 #if DBG && KDBG
2671
2672 BOOLEAN
2673 ExpKdbgExtPool(
2674 ULONG Argc,
2675 PCHAR Argv[])
2676 {
2677 ULONG_PTR Address = 0, Flags = 0;
2678 PVOID PoolPage;
2679 PPOOL_HEADER Entry;
2680 BOOLEAN ThisOne;
2681 PULONG Data;
2682
2683 if (Argc > 1)
2684 {
2685 /* Get address */
2686 if (!KdbpGetHexNumber(Argv[1], &Address))
2687 {
2688 KdbpPrint("Invalid parameter: %s\n", Argv[0]);
2689 return TRUE;
2690 }
2691 }
2692
2693 if (Argc > 2)
2694 {
2695 /* Get address */
2696 if (!KdbpGetHexNumber(Argv[1], &Flags))
2697 {
2698 KdbpPrint("Invalid parameter: %s\n", Argv[0]);
2699 return TRUE;
2700 }
2701 }
2702
2703 /* Check if we got an address */
2704 if (Address != 0)
2705 {
2706 /* Get the base page */
2707 PoolPage = PAGE_ALIGN(Address);
2708 }
2709 else
2710 {
2711 KdbpPrint("Heap is unimplemented\n");
2712 return TRUE;
2713 }
2714
2715 /* No paging support! */
2716 if (!MmIsAddressValid(PoolPage))
2717 {
2718 KdbpPrint("Address not accessible!\n");
2719 return TRUE;
2720 }
2721
2722 /* Get pool type */
2723 if ((Address >= (ULONG_PTR)MmPagedPoolStart) && (Address <= (ULONG_PTR)MmPagedPoolEnd))
2724 KdbpPrint("Allocation is from PagedPool region\n");
2725 else if ((Address >= (ULONG_PTR)MmNonPagedPoolStart) && (Address <= (ULONG_PTR)MmNonPagedPoolEnd))
2726 KdbpPrint("Allocation is from NonPagedPool region\n");
2727 else
2728 {
2729 KdbpPrint("Address 0x%p is not within any pool!\n", (PVOID)Address);
2730 return TRUE;
2731 }
2732
2733 /* Loop all entries of that page */
2734 Entry = PoolPage;
2735 do
2736 {
2737 /* Check if the address is within that entry */
2738 ThisOne = ((Address >= (ULONG_PTR)Entry) &&
2739 (Address < (ULONG_PTR)(Entry + Entry->BlockSize)));
2740
2741 if (!(Flags & 1) || ThisOne)
2742 {
2743 /* Print the line */
2744 KdbpPrint("%c%p size: %4d previous size: %4d %s %.4s\n",
2745 ThisOne ? '*' : ' ', Entry, Entry->BlockSize, Entry->PreviousSize,
2746 (Flags & 0x80000000) ? "" : (Entry->PoolType ? "(Allocated)" : "(Free) "),
2747 (Flags & 0x80000000) ? "" : (PCHAR)&Entry->PoolTag);
2748 }
2749
2750 if (Flags & 1)
2751 {
2752 Data = (PULONG)(Entry + 1);
2753 KdbpPrint(" %p %08lx %08lx %08lx %08lx\n"
2754 " %p %08lx %08lx %08lx %08lx\n",
2755 &Data[0], Data[0], Data[1], Data[2], Data[3],
2756 &Data[4], Data[4], Data[5], Data[6], Data[7]);
2757 }
2758
2759 /* Go to next entry */
2760 Entry = POOL_BLOCK(Entry, Entry->BlockSize);
2761 }
2762 while ((Entry->BlockSize != 0) && ((ULONG_PTR)Entry < (ULONG_PTR)PoolPage + PAGE_SIZE));
2763
2764 return TRUE;
2765 }
2766
2767 #endif // DBG && KDBG
2768
2769 /* EOF */