- Add missing newline to debug print.
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / expool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include "../ARM3/miarm.h"
17
18 #undef ExAllocatePoolWithQuota
19 #undef ExAllocatePoolWithQuotaTag
20
21 /* GLOBALS ********************************************************************/
22
23 #define POOL_BIG_TABLE_ENTRY_FREE 0x1
24
25 typedef struct _POOL_DPC_CONTEXT
26 {
27 PPOOL_TRACKER_TABLE PoolTrackTable;
28 SIZE_T PoolTrackTableSize;
29 PPOOL_TRACKER_TABLE PoolTrackTableExpansion;
30 SIZE_T PoolTrackTableSizeExpansion;
31 } POOL_DPC_CONTEXT, *PPOOL_DPC_CONTEXT;
32
33 ULONG ExpNumberOfPagedPools;
34 POOL_DESCRIPTOR NonPagedPoolDescriptor;
35 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16 + 1];
36 PPOOL_DESCRIPTOR PoolVector[2];
37 PKGUARDED_MUTEX ExpPagedPoolMutex;
38 SIZE_T PoolTrackTableSize, PoolTrackTableMask;
39 SIZE_T PoolBigPageTableSize, PoolBigPageTableHash;
40 PPOOL_TRACKER_TABLE PoolTrackTable;
41 PPOOL_TRACKER_BIG_PAGES PoolBigPageTable;
42 KSPIN_LOCK ExpTaggedPoolLock;
43 ULONG PoolHitTag;
44 BOOLEAN ExStopBadTags;
45 KSPIN_LOCK ExpLargePoolTableLock;
46 LONG ExpPoolBigEntriesInUse;
47 ULONG ExpPoolFlags;
48 ULONG ExPoolFailures;
49
50 /* Pool block/header/list access macros */
51 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
52 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER))
53 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
54 #define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
55 #define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize))
56
57 /*
58 * Pool list access debug macros, similar to Arthur's pfnlist.c work.
59 * Microsoft actually implements similar checks in the Windows Server 2003 SP1
60 * pool code, but only for checked builds.
61 *
62 * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
63 * that these checks are done even on retail builds, due to the increasing
64 * number of kernel-mode attacks which depend on dangling list pointers and other
65 * kinds of list-based attacks.
66 *
67 * For now, I will leave these checks on all the time, but later they are likely
68 * to be DBG-only, at least until there are enough kernel-mode security attacks
69 * against ReactOS to warrant the performance hit.
70 *
71 * For now, these are not made inline, so we can get good stack traces.
72 */
73 PLIST_ENTRY
74 NTAPI
75 ExpDecodePoolLink(IN PLIST_ENTRY Link)
76 {
77 return (PLIST_ENTRY)((ULONG_PTR)Link & ~1);
78 }
79
80 PLIST_ENTRY
81 NTAPI
82 ExpEncodePoolLink(IN PLIST_ENTRY Link)
83 {
84 return (PLIST_ENTRY)((ULONG_PTR)Link | 1);
85 }
86
87 VOID
88 NTAPI
89 ExpCheckPoolLinks(IN PLIST_ENTRY ListHead)
90 {
91 if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) ||
92 (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead))
93 {
94 KeBugCheckEx(BAD_POOL_HEADER,
95 3,
96 (ULONG_PTR)ListHead,
97 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink),
98 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink));
99 }
100 }
101
102 VOID
103 NTAPI
104 ExpInitializePoolListHead(IN PLIST_ENTRY ListHead)
105 {
106 ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead);
107 }
108
109 BOOLEAN
110 NTAPI
111 ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead)
112 {
113 return (ExpDecodePoolLink(ListHead->Flink) == ListHead);
114 }
115
116 VOID
117 NTAPI
118 ExpRemovePoolEntryList(IN PLIST_ENTRY Entry)
119 {
120 PLIST_ENTRY Blink, Flink;
121 Flink = ExpDecodePoolLink(Entry->Flink);
122 Blink = ExpDecodePoolLink(Entry->Blink);
123 Flink->Blink = ExpEncodePoolLink(Blink);
124 Blink->Flink = ExpEncodePoolLink(Flink);
125 }
126
127 PLIST_ENTRY
128 NTAPI
129 ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead)
130 {
131 PLIST_ENTRY Entry, Flink;
132 Entry = ExpDecodePoolLink(ListHead->Flink);
133 Flink = ExpDecodePoolLink(Entry->Flink);
134 ListHead->Flink = ExpEncodePoolLink(Flink);
135 Flink->Blink = ExpEncodePoolLink(ListHead);
136 return Entry;
137 }
138
139 PLIST_ENTRY
140 NTAPI
141 ExpRemovePoolTailList(IN PLIST_ENTRY ListHead)
142 {
143 PLIST_ENTRY Entry, Blink;
144 Entry = ExpDecodePoolLink(ListHead->Blink);
145 Blink = ExpDecodePoolLink(Entry->Blink);
146 ListHead->Blink = ExpEncodePoolLink(Blink);
147 Blink->Flink = ExpEncodePoolLink(ListHead);
148 return Entry;
149 }
150
151 VOID
152 NTAPI
153 ExpInsertPoolTailList(IN PLIST_ENTRY ListHead,
154 IN PLIST_ENTRY Entry)
155 {
156 PLIST_ENTRY Blink;
157 ExpCheckPoolLinks(ListHead);
158 Blink = ExpDecodePoolLink(ListHead->Blink);
159 Entry->Flink = ExpEncodePoolLink(ListHead);
160 Entry->Blink = ExpEncodePoolLink(Blink);
161 Blink->Flink = ExpEncodePoolLink(Entry);
162 ListHead->Blink = ExpEncodePoolLink(Entry);
163 ExpCheckPoolLinks(ListHead);
164 }
165
166 VOID
167 NTAPI
168 ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead,
169 IN PLIST_ENTRY Entry)
170 {
171 PLIST_ENTRY Flink;
172 ExpCheckPoolLinks(ListHead);
173 Flink = ExpDecodePoolLink(ListHead->Flink);
174 Entry->Flink = ExpEncodePoolLink(Flink);
175 Entry->Blink = ExpEncodePoolLink(ListHead);
176 Flink->Blink = ExpEncodePoolLink(Entry);
177 ListHead->Flink = ExpEncodePoolLink(Entry);
178 ExpCheckPoolLinks(ListHead);
179 }
180
181 VOID
182 NTAPI
183 ExpCheckPoolHeader(IN PPOOL_HEADER Entry)
184 {
185 PPOOL_HEADER PreviousEntry, NextEntry;
186
187 /* Is there a block before this one? */
188 if (Entry->PreviousSize)
189 {
190 /* Get it */
191 PreviousEntry = POOL_PREV_BLOCK(Entry);
192
193 /* The two blocks must be on the same page! */
194 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry))
195 {
196 /* Something is awry */
197 KeBugCheckEx(BAD_POOL_HEADER,
198 6,
199 (ULONG_PTR)PreviousEntry,
200 __LINE__,
201 (ULONG_PTR)Entry);
202 }
203
204 /* This block should also indicate that it's as large as we think it is */
205 if (PreviousEntry->BlockSize != Entry->PreviousSize)
206 {
207 /* Otherwise, someone corrupted one of the sizes */
208 KeBugCheckEx(BAD_POOL_HEADER,
209 5,
210 (ULONG_PTR)PreviousEntry,
211 __LINE__,
212 (ULONG_PTR)Entry);
213 }
214 }
215 else if (PAGE_ALIGN(Entry) != Entry)
216 {
217 /* If there's no block before us, we are the first block, so we should be on a page boundary */
218 KeBugCheckEx(BAD_POOL_HEADER,
219 7,
220 0,
221 __LINE__,
222 (ULONG_PTR)Entry);
223 }
224
225 /* This block must have a size */
226 if (!Entry->BlockSize)
227 {
228 /* Someone must've corrupted this field */
229 KeBugCheckEx(BAD_POOL_HEADER,
230 8,
231 0,
232 __LINE__,
233 (ULONG_PTR)Entry);
234 }
235
236 /* Okay, now get the next block */
237 NextEntry = POOL_NEXT_BLOCK(Entry);
238
239 /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
240 if (PAGE_ALIGN(NextEntry) != NextEntry)
241 {
242 /* The two blocks must be on the same page! */
243 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry))
244 {
245 /* Something is messed up */
246 KeBugCheckEx(BAD_POOL_HEADER,
247 9,
248 (ULONG_PTR)NextEntry,
249 __LINE__,
250 (ULONG_PTR)Entry);
251 }
252
253 /* And this block should think we are as large as we truly are */
254 if (NextEntry->PreviousSize != Entry->BlockSize)
255 {
256 /* Otherwise, someone corrupted the field */
257 KeBugCheckEx(BAD_POOL_HEADER,
258 5,
259 (ULONG_PTR)NextEntry,
260 __LINE__,
261 (ULONG_PTR)Entry);
262 }
263 }
264 }
265
266 VOID
267 NTAPI
268 ExpCheckPoolBlocks(IN PVOID Block)
269 {
270 BOOLEAN FoundBlock = FALSE;
271 SIZE_T Size = 0;
272 PPOOL_HEADER Entry;
273
274 /* Get the first entry for this page, make sure it really is the first */
275 Entry = PAGE_ALIGN(Block);
276 ASSERT(Entry->PreviousSize == 0);
277
278 /* Now scan each entry */
279 while (TRUE)
280 {
281 /* When we actually found our block, remember this */
282 if (Entry == Block) FoundBlock = TRUE;
283
284 /* Now validate this block header */
285 ExpCheckPoolHeader(Entry);
286
287 /* And go to the next one, keeping track of our size */
288 Size += Entry->BlockSize;
289 Entry = POOL_NEXT_BLOCK(Entry);
290
291 /* If we hit the last block, stop */
292 if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break;
293
294 /* If we hit the end of the page, stop */
295 if (PAGE_ALIGN(Entry) == Entry) break;
296 }
297
298 /* We must've found our block, and we must have hit the end of the page */
299 if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock))
300 {
301 /* Otherwise, the blocks are messed up */
302 KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry);
303 }
304 }
305
306 FORCEINLINE
307 VOID
308 ExpCheckPoolIrqlLevel(IN POOL_TYPE PoolType,
309 IN SIZE_T NumberOfBytes,
310 IN PVOID Entry)
311 {
312 //
313 // Validate IRQL: It must be APC_LEVEL or lower for Paged Pool, and it must
314 // be DISPATCH_LEVEL or lower for Non Paged Pool
315 //
316 if (((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) ?
317 (KeGetCurrentIrql() > APC_LEVEL) :
318 (KeGetCurrentIrql() > DISPATCH_LEVEL))
319 {
320 //
321 // Take the system down
322 //
323 KeBugCheckEx(BAD_POOL_CALLER,
324 !Entry ? POOL_ALLOC_IRQL_INVALID : POOL_FREE_IRQL_INVALID,
325 KeGetCurrentIrql(),
326 PoolType,
327 !Entry ? NumberOfBytes : (ULONG_PTR)Entry);
328 }
329 }
330
331 FORCEINLINE
332 ULONG
333 ExpComputeHashForTag(IN ULONG Tag,
334 IN SIZE_T BucketMask)
335 {
336 //
337 // Compute the hash by multiplying with a large prime number and then XORing
338 // with the HIDWORD of the result.
339 //
340 // Finally, AND with the bucket mask to generate a valid index/bucket into
341 // the table
342 //
343 ULONGLONG Result = 40543 * Tag;
344 return (ULONG)BucketMask & ((ULONG)Result ^ (Result >> 32));
345 }
346
347 FORCEINLINE
348 ULONG
349 ExpComputePartialHashForAddress(IN PVOID BaseAddress)
350 {
351 ULONG Result;
352 //
353 // Compute the hash by converting the address into a page number, and then
354 // XORing each nibble with the next one.
355 //
356 // We do *NOT* AND with the bucket mask at this point because big table expansion
357 // might happen. Therefore, the final step of the hash must be performed
358 // while holding the expansion pushlock, and this is why we call this a
359 // "partial" hash only.
360 //
361 Result = (ULONG)((ULONG_PTR)BaseAddress >> PAGE_SHIFT);
362 return (Result >> 24) ^ (Result >> 16) ^ (Result >> 8) ^ Result;
363 }
364
365 /* PRIVATE FUNCTIONS **********************************************************/
366
367 VOID
368 NTAPI
369 INIT_FUNCTION
370 ExpSeedHotTags(VOID)
371 {
372 ULONG i, Key, Hash, Index;
373 PPOOL_TRACKER_TABLE TrackTable = PoolTrackTable;
374 ULONG TagList[] =
375 {
376 ' oI',
377 ' laH',
378 'PldM',
379 'LooP',
380 'tSbO',
381 ' prI',
382 'bdDN',
383 'LprI',
384 'pOoI',
385 ' ldM',
386 'eliF',
387 'aVMC',
388 'dSeS',
389 'CFtN',
390 'looP',
391 'rPCT',
392 'bNMC',
393 'dTeS',
394 'sFtN',
395 'TPCT',
396 'CPCT',
397 ' yeK',
398 'qSbO',
399 'mNoI',
400 'aEoI',
401 'cPCT',
402 'aFtN',
403 '0ftN',
404 'tceS',
405 'SprI',
406 'ekoT',
407 ' eS',
408 'lCbO',
409 'cScC',
410 'lFtN',
411 'cAeS',
412 'mfSF',
413 'kWcC',
414 'miSF',
415 'CdfA',
416 'EdfA',
417 'orSF',
418 'nftN',
419 'PRIU',
420 'rFpN',
421 'RFpN',
422 'aPeS',
423 'sUeS',
424 'FpcA',
425 'MpcA',
426 'cSeS',
427 'mNbO',
428 'sFpN',
429 'uLeS',
430 'DPcS',
431 'nevE',
432 'vrqR',
433 'ldaV',
434 ' pP',
435 'SdaV',
436 ' daV',
437 'LdaV',
438 'FdaV',
439 ' GIB',
440 };
441
442 //
443 // Loop all 64 hot tags
444 //
445 ASSERT((sizeof(TagList) / sizeof(ULONG)) == 64);
446 for (i = 0; i < sizeof(TagList) / sizeof(ULONG); i++)
447 {
448 //
449 // Get the current tag, and compute its hash in the tracker table
450 //
451 Key = TagList[i];
452 Hash = ExpComputeHashForTag(Key, PoolTrackTableMask);
453
454 //
455 // Loop all the hashes in this index/bucket
456 //
457 Index = Hash;
458 while (TRUE)
459 {
460 //
461 // Find an empty entry, and make sure this isn't the last hash that
462 // can fit.
463 //
464 // On checked builds, also make sure this is the first time we are
465 // seeding this tag.
466 //
467 ASSERT(TrackTable[Hash].Key != Key);
468 if (!(TrackTable[Hash].Key) && (Hash != PoolTrackTableSize - 1))
469 {
470 //
471 // It has been seeded, move on to the next tag
472 //
473 TrackTable[Hash].Key = Key;
474 break;
475 }
476
477 //
478 // This entry was already taken, compute the next possible hash while
479 // making sure we're not back at our initial index.
480 //
481 ASSERT(TrackTable[Hash].Key != Key);
482 Hash = (Hash + 1) & PoolTrackTableMask;
483 if (Hash == Index) break;
484 }
485 }
486 }
487
488 VOID
489 NTAPI
490 ExpRemovePoolTracker(IN ULONG Key,
491 IN SIZE_T NumberOfBytes,
492 IN POOL_TYPE PoolType)
493 {
494 ULONG Hash, Index;
495 PPOOL_TRACKER_TABLE Table, TableEntry;
496 SIZE_T TableMask, TableSize;
497
498 //
499 // Remove the PROTECTED_POOL flag which is not part of the tag
500 //
501 Key &= ~PROTECTED_POOL;
502
503 //
504 // With WinDBG you can set a tag you want to break on when an allocation is
505 // attempted
506 //
507 if (Key == PoolHitTag) DbgBreakPoint();
508
509 //
510 // Why the double indirection? Because normally this function is also used
511 // when doing session pool allocations, which has another set of tables,
512 // sizes, and masks that live in session pool. Now we don't support session
513 // pool so we only ever use the regular tables, but I'm keeping the code this
514 // way so that the day we DO support session pool, it won't require that
515 // many changes
516 //
517 Table = PoolTrackTable;
518 TableMask = PoolTrackTableMask;
519 TableSize = PoolTrackTableSize;
520
521 //
522 // Compute the hash for this key, and loop all the possible buckets
523 //
524 Hash = ExpComputeHashForTag(Key, TableMask);
525 Index = Hash;
526 while (TRUE)
527 {
528 //
529 // Have we found the entry for this tag? */
530 //
531 TableEntry = &Table[Hash];
532 if (TableEntry->Key == Key)
533 {
534 //
535 // Decrement the counters depending on if this was paged or nonpaged
536 // pool
537 //
538 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
539 {
540 InterlockedIncrement(&TableEntry->NonPagedFrees);
541 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes,
542 -(SSIZE_T)NumberOfBytes);
543 return;
544 }
545 InterlockedIncrement(&TableEntry->PagedFrees);
546 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes,
547 -(SSIZE_T)NumberOfBytes);
548 return;
549 }
550
551 //
552 // We should have only ended up with an empty entry if we've reached
553 // the last bucket
554 //
555 if (!TableEntry->Key) ASSERT(Hash == TableMask);
556
557 //
558 // This path is hit when we don't have an entry, and the current bucket
559 // is full, so we simply try the next one
560 //
561 Hash = (Hash + 1) & TableMask;
562 if (Hash == Index) break;
563 }
564
565 //
566 // And finally this path is hit when all the buckets are full, and we need
567 // some expansion. This path is not yet supported in ReactOS and so we'll
568 // ignore the tag
569 //
570 DPRINT1("Out of pool tag space, ignoring...\n");
571 }
572
573 VOID
574 NTAPI
575 ExpInsertPoolTracker(IN ULONG Key,
576 IN SIZE_T NumberOfBytes,
577 IN POOL_TYPE PoolType)
578 {
579 ULONG Hash, Index;
580 KIRQL OldIrql;
581 PPOOL_TRACKER_TABLE Table, TableEntry;
582 SIZE_T TableMask, TableSize;
583
584 //
585 // Remove the PROTECTED_POOL flag which is not part of the tag
586 //
587 Key &= ~PROTECTED_POOL;
588
589 //
590 // With WinDBG you can set a tag you want to break on when an allocation is
591 // attempted
592 //
593 if (Key == PoolHitTag) DbgBreakPoint();
594
595 //
596 // There is also an internal flag you can set to break on malformed tags
597 //
598 if (ExStopBadTags) ASSERT(Key & 0xFFFFFF00);
599
600 //
601 // ASSERT on ReactOS features not yet supported
602 //
603 ASSERT(!(PoolType & SESSION_POOL_MASK));
604 ASSERT(KeGetCurrentProcessorNumber() == 0);
605
606 //
607 // Why the double indirection? Because normally this function is also used
608 // when doing session pool allocations, which has another set of tables,
609 // sizes, and masks that live in session pool. Now we don't support session
610 // pool so we only ever use the regular tables, but I'm keeping the code this
611 // way so that the day we DO support session pool, it won't require that
612 // many changes
613 //
614 Table = PoolTrackTable;
615 TableMask = PoolTrackTableMask;
616 TableSize = PoolTrackTableSize;
617
618 //
619 // Compute the hash for this key, and loop all the possible buckets
620 //
621 Hash = ExpComputeHashForTag(Key, TableMask);
622 Index = Hash;
623 while (TRUE)
624 {
625 //
626 // Do we already have an entry for this tag? */
627 //
628 TableEntry = &Table[Hash];
629 if (TableEntry->Key == Key)
630 {
631 //
632 // Increment the counters depending on if this was paged or nonpaged
633 // pool
634 //
635 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
636 {
637 InterlockedIncrement(&TableEntry->NonPagedAllocs);
638 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes, NumberOfBytes);
639 return;
640 }
641 InterlockedIncrement(&TableEntry->PagedAllocs);
642 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes, NumberOfBytes);
643 return;
644 }
645
646 //
647 // We don't have an entry yet, but we've found a free bucket for it
648 //
649 if (!(TableEntry->Key) && (Hash != PoolTrackTableSize - 1))
650 {
651 //
652 // We need to hold the lock while creating a new entry, since other
653 // processors might be in this code path as well
654 //
655 ExAcquireSpinLock(&ExpTaggedPoolLock, &OldIrql);
656 if (!PoolTrackTable[Hash].Key)
657 {
658 //
659 // We've won the race, so now create this entry in the bucket
660 //
661 ASSERT(Table[Hash].Key == 0);
662 PoolTrackTable[Hash].Key = Key;
663 TableEntry->Key = Key;
664 }
665 ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql);
666
667 //
668 // Now we force the loop to run again, and we should now end up in
669 // the code path above which does the interlocked increments...
670 //
671 continue;
672 }
673
674 //
675 // This path is hit when we don't have an entry, and the current bucket
676 // is full, so we simply try the next one
677 //
678 Hash = (Hash + 1) & TableMask;
679 if (Hash == Index) break;
680 }
681
682 //
683 // And finally this path is hit when all the buckets are full, and we need
684 // some expansion. This path is not yet supported in ReactOS and so we'll
685 // ignore the tag
686 //
687 DPRINT1("Out of pool tag space, ignoring...\n");
688 }
689
690 VOID
691 NTAPI
692 INIT_FUNCTION
693 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor,
694 IN POOL_TYPE PoolType,
695 IN ULONG PoolIndex,
696 IN ULONG Threshold,
697 IN PVOID PoolLock)
698 {
699 PLIST_ENTRY NextEntry, LastEntry;
700
701 //
702 // Setup the descriptor based on the caller's request
703 //
704 PoolDescriptor->PoolType = PoolType;
705 PoolDescriptor->PoolIndex = PoolIndex;
706 PoolDescriptor->Threshold = Threshold;
707 PoolDescriptor->LockAddress = PoolLock;
708
709 //
710 // Initialize accounting data
711 //
712 PoolDescriptor->RunningAllocs = 0;
713 PoolDescriptor->RunningDeAllocs = 0;
714 PoolDescriptor->TotalPages = 0;
715 PoolDescriptor->TotalBytes = 0;
716 PoolDescriptor->TotalBigPages = 0;
717
718 //
719 // Nothing pending for now
720 //
721 PoolDescriptor->PendingFrees = NULL;
722 PoolDescriptor->PendingFreeDepth = 0;
723
724 //
725 // Loop all the descriptor's allocation lists and initialize them
726 //
727 NextEntry = PoolDescriptor->ListHeads;
728 LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
729 while (NextEntry < LastEntry)
730 {
731 ExpInitializePoolListHead(NextEntry);
732 NextEntry++;
733 }
734
735 //
736 // Note that ReactOS does not support Session Pool Yet
737 //
738 ASSERT(PoolType != PagedPoolSession);
739 }
740
741 VOID
742 NTAPI
743 INIT_FUNCTION
744 InitializePool(IN POOL_TYPE PoolType,
745 IN ULONG Threshold)
746 {
747 PPOOL_DESCRIPTOR Descriptor;
748 SIZE_T TableSize;
749 ULONG i;
750
751 //
752 // Check what kind of pool this is
753 //
754 if (PoolType == NonPagedPool)
755 {
756 //
757 // Compute the track table size and convert it from a power of two to an
758 // actual byte size
759 //
760 // NOTE: On checked builds, we'll assert if the registry table size was
761 // invalid, while on retail builds we'll just break out of the loop at
762 // that point.
763 //
764 TableSize = min(PoolTrackTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
765 for (i = 0; i < 32; i++)
766 {
767 if (TableSize & 1)
768 {
769 ASSERT((TableSize & ~1) == 0);
770 if (!(TableSize & ~1)) break;
771 }
772 TableSize >>= 1;
773 }
774
775 //
776 // If we hit bit 32, than no size was defined in the registry, so
777 // we'll use the default size of 2048 entries.
778 //
779 // Otherwise, use the size from the registry, as long as it's not
780 // smaller than 64 entries.
781 //
782 if (i == 32)
783 {
784 PoolTrackTableSize = 2048;
785 }
786 else
787 {
788 PoolTrackTableSize = max(1 << i, 64);
789 }
790
791 //
792 // Loop trying with the biggest specified size first, and cut it down
793 // by a power of two each iteration in case not enough memory exist
794 //
795 while (TRUE)
796 {
797 //
798 // Do not allow overflow
799 //
800 if ((PoolTrackTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_TABLE)))
801 {
802 PoolTrackTableSize >>= 1;
803 continue;
804 }
805
806 //
807 // Allocate the tracker table and exit the loop if this worked
808 //
809 PoolTrackTable = MiAllocatePoolPages(NonPagedPool,
810 (PoolTrackTableSize + 1) *
811 sizeof(POOL_TRACKER_TABLE));
812 if (PoolTrackTable) break;
813
814 //
815 // Otherwise, as long as we're not down to the last bit, keep
816 // iterating
817 //
818 if (PoolTrackTableSize == 1)
819 {
820 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
821 TableSize,
822 0xFFFFFFFF,
823 0xFFFFFFFF,
824 0xFFFFFFFF);
825 }
826 PoolTrackTableSize >>= 1;
827 }
828
829 //
830 // Finally, add one entry, compute the hash, and zero the table
831 //
832 PoolTrackTableSize++;
833 PoolTrackTableMask = PoolTrackTableSize - 2;
834
835 RtlZeroMemory(PoolTrackTable,
836 PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
837
838 //
839 // We now do the exact same thing with the tracker table for big pages
840 //
841 TableSize = min(PoolBigPageTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
842 for (i = 0; i < 32; i++)
843 {
844 if (TableSize & 1)
845 {
846 ASSERT((TableSize & ~1) == 0);
847 if (!(TableSize & ~1)) break;
848 }
849 TableSize >>= 1;
850 }
851
852 //
853 // For big pages, the default tracker table is 4096 entries, while the
854 // minimum is still 64
855 //
856 if (i == 32)
857 {
858 PoolBigPageTableSize = 4096;
859 }
860 else
861 {
862 PoolBigPageTableSize = max(1 << i, 64);
863 }
864
865 //
866 // Again, run the exact same loop we ran earlier, but this time for the
867 // big pool tracker instead
868 //
869 while (TRUE)
870 {
871 if ((PoolBigPageTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_BIG_PAGES)))
872 {
873 PoolBigPageTableSize >>= 1;
874 continue;
875 }
876
877 PoolBigPageTable = MiAllocatePoolPages(NonPagedPool,
878 PoolBigPageTableSize *
879 sizeof(POOL_TRACKER_BIG_PAGES));
880 if (PoolBigPageTable) break;
881
882 if (PoolBigPageTableSize == 1)
883 {
884 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
885 TableSize,
886 0xFFFFFFFF,
887 0xFFFFFFFF,
888 0xFFFFFFFF);
889 }
890
891 PoolBigPageTableSize >>= 1;
892 }
893
894 //
895 // An extra entry is not needed for for the big pool tracker, so just
896 // compute the hash and zero it
897 //
898 PoolBigPageTableHash = PoolBigPageTableSize - 1;
899 RtlZeroMemory(PoolBigPageTable,
900 PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
901 for (i = 0; i < PoolBigPageTableSize; i++) PoolBigPageTable[i].Va = (PVOID)1;
902
903 //
904 // During development, print this out so we can see what's happening
905 //
906 DPRINT1("EXPOOL: Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
907 PoolTrackTable, PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
908 DPRINT1("EXPOOL: Big Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
909 PoolBigPageTable, PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
910
911 //
912 // Insert the generic tracker for all of big pool
913 //
914 ExpInsertPoolTracker('looP',
915 ROUND_TO_PAGES(PoolBigPageTableSize *
916 sizeof(POOL_TRACKER_BIG_PAGES)),
917 NonPagedPool);
918
919 //
920 // No support for NUMA systems at this time
921 //
922 ASSERT(KeNumberNodes == 1);
923
924 //
925 // Initialize the tag spinlock
926 //
927 KeInitializeSpinLock(&ExpTaggedPoolLock);
928
929 //
930 // Initialize the nonpaged pool descriptor
931 //
932 PoolVector[NonPagedPool] = &NonPagedPoolDescriptor;
933 ExInitializePoolDescriptor(PoolVector[NonPagedPool],
934 NonPagedPool,
935 0,
936 Threshold,
937 NULL);
938 }
939 else
940 {
941 //
942 // No support for NUMA systems at this time
943 //
944 ASSERT(KeNumberNodes == 1);
945
946 //
947 // Allocate the pool descriptor
948 //
949 Descriptor = ExAllocatePoolWithTag(NonPagedPool,
950 sizeof(KGUARDED_MUTEX) +
951 sizeof(POOL_DESCRIPTOR),
952 'looP');
953 if (!Descriptor)
954 {
955 //
956 // This is really bad...
957 //
958 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
959 0,
960 -1,
961 -1,
962 -1);
963 }
964
965 //
966 // Setup the vector and guarded mutex for paged pool
967 //
968 PoolVector[PagedPool] = Descriptor;
969 ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1);
970 ExpPagedPoolDescriptor[0] = Descriptor;
971 KeInitializeGuardedMutex(ExpPagedPoolMutex);
972 ExInitializePoolDescriptor(Descriptor,
973 PagedPool,
974 0,
975 Threshold,
976 ExpPagedPoolMutex);
977
978 //
979 // Insert the generic tracker for all of nonpaged pool
980 //
981 ExpInsertPoolTracker('looP',
982 ROUND_TO_PAGES(PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)),
983 NonPagedPool);
984 }
985 }
986
987 FORCEINLINE
988 KIRQL
989 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
990 {
991 //
992 // Check if this is nonpaged pool
993 //
994 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
995 {
996 //
997 // Use the queued spin lock
998 //
999 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
1000 }
1001 else
1002 {
1003 //
1004 // Use the guarded mutex
1005 //
1006 KeAcquireGuardedMutex(Descriptor->LockAddress);
1007 return APC_LEVEL;
1008 }
1009 }
1010
1011 FORCEINLINE
1012 VOID
1013 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor,
1014 IN KIRQL OldIrql)
1015 {
1016 //
1017 // Check if this is nonpaged pool
1018 //
1019 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1020 {
1021 //
1022 // Use the queued spin lock
1023 //
1024 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
1025 }
1026 else
1027 {
1028 //
1029 // Use the guarded mutex
1030 //
1031 KeReleaseGuardedMutex(Descriptor->LockAddress);
1032 }
1033 }
1034
1035 VOID
1036 NTAPI
1037 ExpGetPoolTagInfoTarget(IN PKDPC Dpc,
1038 IN PVOID DeferredContext,
1039 IN PVOID SystemArgument1,
1040 IN PVOID SystemArgument2)
1041 {
1042 PPOOL_DPC_CONTEXT Context = DeferredContext;
1043 UNREFERENCED_PARAMETER(Dpc);
1044 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
1045
1046 //
1047 // Make sure we win the race, and if we did, copy the data atomically
1048 //
1049 if (KeSignalCallDpcSynchronize(SystemArgument2))
1050 {
1051 RtlCopyMemory(Context->PoolTrackTable,
1052 PoolTrackTable,
1053 Context->PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1054
1055 //
1056 // This is here because ReactOS does not yet support expansion
1057 //
1058 ASSERT(Context->PoolTrackTableSizeExpansion == 0);
1059 }
1060
1061 //
1062 // Regardless of whether we won or not, we must now synchronize and then
1063 // decrement the barrier since this is one more processor that has completed
1064 // the callback.
1065 //
1066 KeSignalCallDpcSynchronize(SystemArgument2);
1067 KeSignalCallDpcDone(SystemArgument1);
1068 }
1069
1070 NTSTATUS
1071 NTAPI
1072 ExGetPoolTagInfo(IN PSYSTEM_POOLTAG_INFORMATION SystemInformation,
1073 IN ULONG SystemInformationLength,
1074 IN OUT PULONG ReturnLength OPTIONAL)
1075 {
1076 ULONG TableSize, CurrentLength;
1077 ULONG EntryCount;
1078 NTSTATUS Status = STATUS_SUCCESS;
1079 PSYSTEM_POOLTAG TagEntry;
1080 PPOOL_TRACKER_TABLE Buffer, TrackerEntry;
1081 POOL_DPC_CONTEXT Context;
1082 ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
1083
1084 //
1085 // Keep track of how much data the caller's buffer must hold
1086 //
1087 CurrentLength = FIELD_OFFSET(SYSTEM_POOLTAG_INFORMATION, TagInfo);
1088
1089 //
1090 // Initialize the caller's buffer
1091 //
1092 TagEntry = &SystemInformation->TagInfo[0];
1093 SystemInformation->Count = 0;
1094
1095 //
1096 // Capture the number of entries, and the total size needed to make a copy
1097 // of the table
1098 //
1099 EntryCount = (ULONG)PoolTrackTableSize;
1100 TableSize = EntryCount * sizeof(POOL_TRACKER_TABLE);
1101
1102 //
1103 // Allocate the "Generic DPC" temporary buffer
1104 //
1105 Buffer = ExAllocatePoolWithTag(NonPagedPool, TableSize, 'ofnI');
1106 if (!Buffer) return STATUS_INSUFFICIENT_RESOURCES;
1107
1108 //
1109 // Do a "Generic DPC" to atomically retrieve the tag and allocation data
1110 //
1111 Context.PoolTrackTable = Buffer;
1112 Context.PoolTrackTableSize = PoolTrackTableSize;
1113 Context.PoolTrackTableExpansion = NULL;
1114 Context.PoolTrackTableSizeExpansion = 0;
1115 KeGenericCallDpc(ExpGetPoolTagInfoTarget, &Context);
1116
1117 //
1118 // Now parse the results
1119 //
1120 for (TrackerEntry = Buffer; TrackerEntry < (Buffer + EntryCount); TrackerEntry++)
1121 {
1122 //
1123 // If the entry is empty, skip it
1124 //
1125 if (!TrackerEntry->Key) continue;
1126
1127 //
1128 // Otherwise, add one more entry to the caller's buffer, and ensure that
1129 // enough space has been allocated in it
1130 //
1131 SystemInformation->Count++;
1132 CurrentLength += sizeof(*TagEntry);
1133 if (SystemInformationLength < CurrentLength)
1134 {
1135 //
1136 // The caller's buffer is too small, so set a failure code. The
1137 // caller will know the count, as well as how much space is needed.
1138 //
1139 // We do NOT break out of the loop, because we want to keep incrementing
1140 // the Count as well as CurrentLength so that the caller can know the
1141 // final numbers
1142 //
1143 Status = STATUS_INFO_LENGTH_MISMATCH;
1144 }
1145 else
1146 {
1147 //
1148 // Small sanity check that our accounting is working correctly
1149 //
1150 ASSERT(TrackerEntry->PagedAllocs >= TrackerEntry->PagedFrees);
1151 ASSERT(TrackerEntry->NonPagedAllocs >= TrackerEntry->NonPagedFrees);
1152
1153 //
1154 // Return the data into the caller's buffer
1155 //
1156 TagEntry->TagUlong = TrackerEntry->Key;
1157 TagEntry->PagedAllocs = TrackerEntry->PagedAllocs;
1158 TagEntry->PagedFrees = TrackerEntry->PagedFrees;
1159 TagEntry->PagedUsed = TrackerEntry->PagedBytes;
1160 TagEntry->NonPagedAllocs = TrackerEntry->NonPagedAllocs;
1161 TagEntry->NonPagedFrees = TrackerEntry->NonPagedFrees;
1162 TagEntry->NonPagedUsed = TrackerEntry->NonPagedBytes;
1163 TagEntry++;
1164 }
1165 }
1166
1167 //
1168 // Free the "Generic DPC" temporary buffer, return the buffer length and status
1169 //
1170 ExFreePool(Buffer);
1171 if (ReturnLength) *ReturnLength = CurrentLength;
1172 return Status;
1173 }
1174
1175 BOOLEAN
1176 NTAPI
1177 ExpAddTagForBigPages(IN PVOID Va,
1178 IN ULONG Key,
1179 IN ULONG NumberOfPages,
1180 IN POOL_TYPE PoolType)
1181 {
1182 ULONG Hash, i = 0;
1183 PVOID OldVa;
1184 KIRQL OldIrql;
1185 SIZE_T TableSize;
1186 PPOOL_TRACKER_BIG_PAGES Entry, EntryEnd, EntryStart;
1187 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1188 ASSERT(!(PoolType & SESSION_POOL_MASK));
1189
1190 //
1191 // As the table is expandable, these values must only be read after acquiring
1192 // the lock to avoid a teared access during an expansion
1193 //
1194 Hash = ExpComputePartialHashForAddress(Va);
1195 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1196 Hash &= PoolBigPageTableHash;
1197 TableSize = PoolBigPageTableSize;
1198
1199 //
1200 // We loop from the current hash bucket to the end of the table, and then
1201 // rollover to hash bucket 0 and keep going from there. If we return back
1202 // to the beginning, then we attempt expansion at the bottom of the loop
1203 //
1204 EntryStart = Entry = &PoolBigPageTable[Hash];
1205 EntryEnd = &PoolBigPageTable[TableSize];
1206 do
1207 {
1208 //
1209 // Make sure that this is a free entry and attempt to atomically make the
1210 // entry busy now
1211 //
1212 OldVa = Entry->Va;
1213 if (((ULONG_PTR)OldVa & POOL_BIG_TABLE_ENTRY_FREE) &&
1214 (InterlockedCompareExchangePointer(&Entry->Va, Va, OldVa) == OldVa))
1215 {
1216 //
1217 // We now own this entry, write down the size and the pool tag
1218 //
1219 Entry->Key = Key;
1220 Entry->NumberOfPages = NumberOfPages;
1221
1222 //
1223 // Add one more entry to the count, and see if we're getting within
1224 // 25% of the table size, at which point we'll do an expansion now
1225 // to avoid blocking too hard later on.
1226 //
1227 // Note that we only do this if it's also been the 16th time that we
1228 // keep losing the race or that we are not finding a free entry anymore,
1229 // which implies a massive number of concurrent big pool allocations.
1230 //
1231 InterlockedIncrement(&ExpPoolBigEntriesInUse);
1232 if ((i >= 16) && (ExpPoolBigEntriesInUse > (TableSize / 4)))
1233 {
1234 DPRINT1("Should attempt expansion since we now have %d entries\n",
1235 ExpPoolBigEntriesInUse);
1236 }
1237
1238 //
1239 // We have our entry, return
1240 //
1241 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1242 return TRUE;
1243 }
1244
1245 //
1246 // We don't have our entry yet, so keep trying, making the entry list
1247 // circular if we reach the last entry. We'll eventually break out of
1248 // the loop once we've rolled over and returned back to our original
1249 // hash bucket
1250 //
1251 i++;
1252 if (++Entry >= EntryEnd) Entry = &PoolBigPageTable[0];
1253 } while (Entry != EntryStart);
1254
1255 //
1256 // This means there's no free hash buckets whatsoever, so we would now have
1257 // to attempt expanding the table
1258 //
1259 DPRINT1("Big pool expansion needed, not implemented!\n");
1260 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1261 return FALSE;
1262 }
1263
1264 ULONG
1265 NTAPI
1266 ExpFindAndRemoveTagBigPages(IN PVOID Va,
1267 OUT PULONG_PTR BigPages,
1268 IN POOL_TYPE PoolType)
1269 {
1270 BOOLEAN FirstTry = TRUE;
1271 SIZE_T TableSize;
1272 KIRQL OldIrql;
1273 ULONG PoolTag, Hash;
1274 PPOOL_TRACKER_BIG_PAGES Entry;
1275 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1276 ASSERT(!(PoolType & SESSION_POOL_MASK));
1277
1278 //
1279 // As the table is expandable, these values must only be read after acquiring
1280 // the lock to avoid a teared access during an expansion
1281 //
1282 Hash = ExpComputePartialHashForAddress(Va);
1283 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1284 Hash &= PoolBigPageTableHash;
1285 TableSize = PoolBigPageTableSize;
1286
1287 //
1288 // Loop while trying to find this big page allocation
1289 //
1290 while (PoolBigPageTable[Hash].Va != Va)
1291 {
1292 //
1293 // Increment the size until we go past the end of the table
1294 //
1295 if (++Hash >= TableSize)
1296 {
1297 //
1298 // Is this the second time we've tried?
1299 //
1300 if (!FirstTry)
1301 {
1302 //
1303 // This means it was never inserted into the pool table and it
1304 // received the special "BIG" tag -- return that and return 0
1305 // so that the code can ask Mm for the page count instead
1306 //
1307 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1308 *BigPages = 0;
1309 return ' GIB';
1310 }
1311
1312 //
1313 // The first time this happens, reset the hash index and try again
1314 //
1315 Hash = 0;
1316 FirstTry = FALSE;
1317 }
1318 }
1319
1320 //
1321 // Now capture all the information we need from the entry, since after we
1322 // release the lock, the data can change
1323 //
1324 Entry = &PoolBigPageTable[Hash];
1325 *BigPages = Entry->NumberOfPages;
1326 PoolTag = Entry->Key;
1327
1328 //
1329 // Set the free bit, and decrement the number of allocations. Finally, release
1330 // the lock and return the tag that was located
1331 //
1332 InterlockedIncrement((PLONG)&Entry->Va);
1333 InterlockedDecrement(&ExpPoolBigEntriesInUse);
1334 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1335 return PoolTag;
1336 }
1337
1338 VOID
1339 NTAPI
1340 ExQueryPoolUsage(OUT PULONG PagedPoolPages,
1341 OUT PULONG NonPagedPoolPages,
1342 OUT PULONG PagedPoolAllocs,
1343 OUT PULONG PagedPoolFrees,
1344 OUT PULONG PagedPoolLookasideHits,
1345 OUT PULONG NonPagedPoolAllocs,
1346 OUT PULONG NonPagedPoolFrees,
1347 OUT PULONG NonPagedPoolLookasideHits)
1348 {
1349 ULONG i;
1350 PPOOL_DESCRIPTOR PoolDesc;
1351
1352 //
1353 // Assume all failures
1354 //
1355 *PagedPoolPages = 0;
1356 *PagedPoolAllocs = 0;
1357 *PagedPoolFrees = 0;
1358
1359 //
1360 // Tally up the totals for all the apged pool
1361 //
1362 for (i = 0; i < ExpNumberOfPagedPools + 1; i++)
1363 {
1364 PoolDesc = ExpPagedPoolDescriptor[i];
1365 *PagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1366 *PagedPoolAllocs += PoolDesc->RunningAllocs;
1367 *PagedPoolFrees += PoolDesc->RunningDeAllocs;
1368 }
1369
1370 //
1371 // The first non-paged pool has a hardcoded well-known descriptor name
1372 //
1373 PoolDesc = &NonPagedPoolDescriptor;
1374 *NonPagedPoolPages = PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1375 *NonPagedPoolAllocs = PoolDesc->RunningAllocs;
1376 *NonPagedPoolFrees = PoolDesc->RunningDeAllocs;
1377
1378 //
1379 // If the system has more than one non-paged pool, copy the other descriptor
1380 // totals as well
1381 //
1382 #if 0
1383 if (ExpNumberOfNonPagedPools > 1)
1384 {
1385 for (i = 0; i < ExpNumberOfNonPagedPools; i++)
1386 {
1387 PoolDesc = ExpNonPagedPoolDescriptor[i];
1388 *NonPagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1389 *NonPagedPoolAllocs += PoolDesc->RunningAllocs;
1390 *NonPagedPoolFrees += PoolDesc->RunningDeAllocs;
1391 }
1392 }
1393 #endif
1394
1395 //
1396 // FIXME: Not yet supported
1397 //
1398 *NonPagedPoolLookasideHits += 0;
1399 *PagedPoolLookasideHits += 0;
1400 }
1401
1402 /* PUBLIC FUNCTIONS ***********************************************************/
1403
1404 /*
1405 * @implemented
1406 */
1407 PVOID
1408 NTAPI
1409 ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
1410 IN SIZE_T NumberOfBytes,
1411 IN ULONG Tag)
1412 {
1413 PPOOL_DESCRIPTOR PoolDesc;
1414 PLIST_ENTRY ListHead;
1415 PPOOL_HEADER Entry, NextEntry, FragmentEntry;
1416 KIRQL OldIrql;
1417 USHORT BlockSize, i;
1418 ULONG OriginalType;
1419 PKPRCB Prcb = KeGetCurrentPrcb();
1420 PGENERAL_LOOKASIDE LookasideList;
1421
1422 //
1423 // Some sanity checks
1424 //
1425 ASSERT(Tag != 0);
1426 ASSERT(Tag != ' GIB');
1427 ASSERT(NumberOfBytes != 0);
1428 ExpCheckPoolIrqlLevel(PoolType, NumberOfBytes, NULL);
1429
1430 //
1431 // Not supported in ReactOS
1432 //
1433 ASSERT(!(PoolType & SESSION_POOL_MASK));
1434
1435 //
1436 // Check if verifier or special pool is enabled
1437 //
1438 if (ExpPoolFlags & (POOL_FLAG_VERIFIER | POOL_FLAG_SPECIAL_POOL))
1439 {
1440 //
1441 // For verifier, we should call the verification routine
1442 //
1443 if (ExpPoolFlags & POOL_FLAG_VERIFIER)
1444 {
1445 DPRINT1("Driver Verifier is not yet supported\n");
1446 }
1447
1448 //
1449 // For special pool, we check if this is a suitable allocation and do
1450 // the special allocation if needed
1451 //
1452 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
1453 {
1454 //
1455 // Check if this is a special pool allocation
1456 //
1457 if (MmUseSpecialPool(NumberOfBytes, Tag))
1458 {
1459 //
1460 // Try to allocate using special pool
1461 //
1462 Entry = MmAllocateSpecialPool(NumberOfBytes, Tag, PoolType, 2);
1463 if (Entry) return Entry;
1464 }
1465 }
1466 }
1467
1468 //
1469 // Get the pool type and its corresponding vector for this request
1470 //
1471 OriginalType = PoolType;
1472 PoolType = PoolType & BASE_POOL_TYPE_MASK;
1473 PoolDesc = PoolVector[PoolType];
1474 ASSERT(PoolDesc != NULL);
1475
1476 //
1477 // Check if this is a big page allocation
1478 //
1479 if (NumberOfBytes > POOL_MAX_ALLOC)
1480 {
1481 //
1482 // Allocate pages for it
1483 //
1484 Entry = MiAllocatePoolPages(OriginalType, NumberOfBytes);
1485 if (!Entry)
1486 {
1487 //
1488 // Must succeed pool is deprecated, but still supported. These allocation
1489 // failures must cause an immediate bugcheck
1490 //
1491 if (OriginalType & MUST_SUCCEED_POOL_MASK)
1492 {
1493 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1494 NumberOfBytes,
1495 NonPagedPoolDescriptor.TotalPages,
1496 NonPagedPoolDescriptor.TotalBigPages,
1497 0);
1498 }
1499
1500 //
1501 // Internal debugging
1502 //
1503 ExPoolFailures++;
1504
1505 //
1506 // This flag requests printing failures, and can also further specify
1507 // breaking on failures
1508 //
1509 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
1510 {
1511 DPRINT1("EX: ExAllocatePool (%p, 0x%x) returning NULL\n",
1512 NumberOfBytes,
1513 OriginalType);
1514 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
1515 }
1516
1517 //
1518 // Finally, this flag requests an exception, which we are more than
1519 // happy to raise!
1520 //
1521 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
1522 {
1523 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
1524 }
1525 }
1526
1527 //
1528 // Increment required counters
1529 //
1530 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
1531 (LONG)BYTES_TO_PAGES(NumberOfBytes));
1532 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, NumberOfBytes);
1533 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
1534
1535 //
1536 // Add a tag for the big page allocation and switch to the generic "BIG"
1537 // tag if we failed to do so, then insert a tracker for this alloation.
1538 //
1539 if (!ExpAddTagForBigPages(Entry,
1540 Tag,
1541 (ULONG)BYTES_TO_PAGES(NumberOfBytes),
1542 OriginalType))
1543 {
1544 Tag = ' GIB';
1545 }
1546 ExpInsertPoolTracker(Tag, ROUND_TO_PAGES(NumberOfBytes), OriginalType);
1547 return Entry;
1548 }
1549
1550 //
1551 // Should never request 0 bytes from the pool, but since so many drivers do
1552 // it, we'll just assume they want 1 byte, based on NT's similar behavior
1553 //
1554 if (!NumberOfBytes) NumberOfBytes = 1;
1555
1556 //
1557 // A pool allocation is defined by its data, a linked list to connect it to
1558 // the free list (if necessary), and a pool header to store accounting info.
1559 // Calculate this size, then convert it into a block size (units of pool
1560 // headers)
1561 //
1562 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
1563 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
1564 // the direct allocation of pages.
1565 //
1566 i = (USHORT)((NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1))
1567 / POOL_BLOCK_SIZE);
1568
1569 //
1570 // Handle lookaside list optimization for both paged and nonpaged pool
1571 //
1572 if (i <= MAXIMUM_PROCESSORS)
1573 {
1574 //
1575 // Try popping it from the per-CPU lookaside list
1576 //
1577 LookasideList = (PoolType == PagedPool) ?
1578 Prcb->PPPagedLookasideList[i - 1].P :
1579 Prcb->PPNPagedLookasideList[i - 1].P;
1580 LookasideList->TotalAllocates++;
1581 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
1582 if (!Entry)
1583 {
1584 //
1585 // We failed, try popping it from the global list
1586 //
1587 LookasideList = (PoolType == PagedPool) ?
1588 Prcb->PPPagedLookasideList[i - 1].L :
1589 Prcb->PPNPagedLookasideList[i - 1].L;
1590 LookasideList->TotalAllocates++;
1591 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
1592 }
1593
1594 //
1595 // If we were able to pop it, update the accounting and return the block
1596 //
1597 if (Entry)
1598 {
1599 LookasideList->AllocateHits++;
1600
1601 //
1602 // Get the real entry, write down its pool type, and track it
1603 //
1604 Entry--;
1605 Entry->PoolType = PoolType + 1;
1606 ExpInsertPoolTracker(Tag,
1607 Entry->BlockSize * POOL_BLOCK_SIZE,
1608 OriginalType);
1609
1610 //
1611 // Return the pool allocation
1612 //
1613 Entry->PoolTag = Tag;
1614 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
1615 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
1616 return POOL_FREE_BLOCK(Entry);
1617 }
1618 }
1619
1620 //
1621 // Loop in the free lists looking for a block if this size. Start with the
1622 // list optimized for this kind of size lookup
1623 //
1624 ListHead = &PoolDesc->ListHeads[i];
1625 do
1626 {
1627 //
1628 // Are there any free entries available on this list?
1629 //
1630 if (!ExpIsPoolListEmpty(ListHead))
1631 {
1632 //
1633 // Acquire the pool lock now
1634 //
1635 OldIrql = ExLockPool(PoolDesc);
1636
1637 //
1638 // And make sure the list still has entries
1639 //
1640 if (ExpIsPoolListEmpty(ListHead))
1641 {
1642 //
1643 // Someone raced us (and won) before we had a chance to acquire
1644 // the lock.
1645 //
1646 // Try again!
1647 //
1648 ExUnlockPool(PoolDesc, OldIrql);
1649 ListHead++;
1650 continue;
1651 }
1652
1653 //
1654 // Remove a free entry from the list
1655 // Note that due to the way we insert free blocks into multiple lists
1656 // there is a guarantee that any block on this list will either be
1657 // of the correct size, or perhaps larger.
1658 //
1659 ExpCheckPoolLinks(ListHead);
1660 Entry = POOL_ENTRY(ExpRemovePoolHeadList(ListHead));
1661 ExpCheckPoolLinks(ListHead);
1662 ExpCheckPoolBlocks(Entry);
1663 ASSERT(Entry->BlockSize >= i);
1664 ASSERT(Entry->PoolType == 0);
1665
1666 //
1667 // Check if this block is larger that what we need. The block could
1668 // not possibly be smaller, due to the reason explained above (and
1669 // we would've asserted on a checked build if this was the case).
1670 //
1671 if (Entry->BlockSize != i)
1672 {
1673 //
1674 // Is there an entry before this one?
1675 //
1676 if (Entry->PreviousSize == 0)
1677 {
1678 //
1679 // There isn't anyone before us, so take the next block and
1680 // turn it into a fragment that contains the leftover data
1681 // that we don't need to satisfy the caller's request
1682 //
1683 FragmentEntry = POOL_BLOCK(Entry, i);
1684 FragmentEntry->BlockSize = Entry->BlockSize - i;
1685
1686 //
1687 // And make it point back to us
1688 //
1689 FragmentEntry->PreviousSize = i;
1690
1691 //
1692 // Now get the block that follows the new fragment and check
1693 // if it's still on the same page as us (and not at the end)
1694 //
1695 NextEntry = POOL_NEXT_BLOCK(FragmentEntry);
1696 if (PAGE_ALIGN(NextEntry) != NextEntry)
1697 {
1698 //
1699 // Adjust this next block to point to our newly created
1700 // fragment block
1701 //
1702 NextEntry->PreviousSize = FragmentEntry->BlockSize;
1703 }
1704 }
1705 else
1706 {
1707 //
1708 // There is a free entry before us, which we know is smaller
1709 // so we'll make this entry the fragment instead
1710 //
1711 FragmentEntry = Entry;
1712
1713 //
1714 // And then we'll remove from it the actual size required.
1715 // Now the entry is a leftover free fragment
1716 //
1717 Entry->BlockSize -= i;
1718
1719 //
1720 // Now let's go to the next entry after the fragment (which
1721 // used to point to our original free entry) and make it
1722 // reference the new fragment entry instead.
1723 //
1724 // This is the entry that will actually end up holding the
1725 // allocation!
1726 //
1727 Entry = POOL_NEXT_BLOCK(Entry);
1728 Entry->PreviousSize = FragmentEntry->BlockSize;
1729
1730 //
1731 // And now let's go to the entry after that one and check if
1732 // it's still on the same page, and not at the end
1733 //
1734 NextEntry = POOL_BLOCK(Entry, i);
1735 if (PAGE_ALIGN(NextEntry) != NextEntry)
1736 {
1737 //
1738 // Make it reference the allocation entry
1739 //
1740 NextEntry->PreviousSize = i;
1741 }
1742 }
1743
1744 //
1745 // Now our (allocation) entry is the right size
1746 //
1747 Entry->BlockSize = i;
1748
1749 //
1750 // And the next entry is now the free fragment which contains
1751 // the remaining difference between how big the original entry
1752 // was, and the actual size the caller needs/requested.
1753 //
1754 FragmentEntry->PoolType = 0;
1755 BlockSize = FragmentEntry->BlockSize;
1756
1757 //
1758 // Now check if enough free bytes remained for us to have a
1759 // "full" entry, which contains enough bytes for a linked list
1760 // and thus can be used for allocations (up to 8 bytes...)
1761 //
1762 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
1763 if (BlockSize != 1)
1764 {
1765 //
1766 // Insert the free entry into the free list for this size
1767 //
1768 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
1769 POOL_FREE_BLOCK(FragmentEntry));
1770 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
1771 }
1772 }
1773
1774 //
1775 // We have found an entry for this allocation, so set the pool type
1776 // and release the lock since we're done
1777 //
1778 Entry->PoolType = PoolType + 1;
1779 ExpCheckPoolBlocks(Entry);
1780 ExUnlockPool(PoolDesc, OldIrql);
1781
1782 //
1783 // Increment required counters
1784 //
1785 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
1786 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
1787
1788 //
1789 // Track this allocation
1790 //
1791 ExpInsertPoolTracker(Tag,
1792 Entry->BlockSize * POOL_BLOCK_SIZE,
1793 OriginalType);
1794
1795 //
1796 // Return the pool allocation
1797 //
1798 Entry->PoolTag = Tag;
1799 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
1800 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
1801 return POOL_FREE_BLOCK(Entry);
1802 }
1803 } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
1804
1805 //
1806 // There were no free entries left, so we have to allocate a new fresh page
1807 //
1808 Entry = MiAllocatePoolPages(PoolType, PAGE_SIZE);
1809 if (!Entry)
1810 {
1811 //
1812 // Must succeed pool is deprecated, but still supported. These allocation
1813 // failures must cause an immediate bugcheck
1814 //
1815 if (OriginalType & MUST_SUCCEED_POOL_MASK)
1816 {
1817 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1818 PAGE_SIZE,
1819 NonPagedPoolDescriptor.TotalPages,
1820 NonPagedPoolDescriptor.TotalBigPages,
1821 0);
1822 }
1823
1824 //
1825 // Internal debugging
1826 //
1827 ExPoolFailures++;
1828
1829 //
1830 // This flag requests printing failures, and can also further specify
1831 // breaking on failures
1832 //
1833 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
1834 {
1835 DPRINT1("EX: ExAllocatePool (%p, 0x%x) returning NULL\n",
1836 NumberOfBytes,
1837 OriginalType);
1838 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
1839 }
1840
1841 //
1842 // Finally, this flag requests an exception, which we are more than
1843 // happy to raise!
1844 //
1845 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
1846 {
1847 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
1848 }
1849
1850 //
1851 // Return NULL to the caller in all other cases
1852 //
1853 return NULL;
1854 }
1855
1856 //
1857 // Setup the entry data
1858 //
1859 Entry->Ulong1 = 0;
1860 Entry->BlockSize = i;
1861 Entry->PoolType = PoolType + 1;
1862
1863 //
1864 // This page will have two entries -- one for the allocation (which we just
1865 // created above), and one for the remaining free bytes, which we're about
1866 // to create now. The free bytes are the whole page minus what was allocated
1867 // and then converted into units of block headers.
1868 //
1869 BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i;
1870 FragmentEntry = POOL_BLOCK(Entry, i);
1871 FragmentEntry->Ulong1 = 0;
1872 FragmentEntry->BlockSize = BlockSize;
1873 FragmentEntry->PreviousSize = i;
1874
1875 //
1876 // Increment required counters
1877 //
1878 InterlockedIncrement((PLONG)&PoolDesc->TotalPages);
1879 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
1880
1881 //
1882 // Now check if enough free bytes remained for us to have a "full" entry,
1883 // which contains enough bytes for a linked list and thus can be used for
1884 // allocations (up to 8 bytes...)
1885 //
1886 if (FragmentEntry->BlockSize != 1)
1887 {
1888 //
1889 // Excellent -- acquire the pool lock
1890 //
1891 OldIrql = ExLockPool(PoolDesc);
1892
1893 //
1894 // And insert the free entry into the free list for this block size
1895 //
1896 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
1897 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
1898 POOL_FREE_BLOCK(FragmentEntry));
1899 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
1900
1901 //
1902 // Release the pool lock
1903 //
1904 ExpCheckPoolBlocks(Entry);
1905 ExUnlockPool(PoolDesc, OldIrql);
1906 }
1907 else
1908 {
1909 //
1910 // Simply do a sanity check
1911 //
1912 ExpCheckPoolBlocks(Entry);
1913 }
1914
1915 //
1916 // Increment performance counters and track this allocation
1917 //
1918 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
1919 ExpInsertPoolTracker(Tag,
1920 Entry->BlockSize * POOL_BLOCK_SIZE,
1921 PoolType);
1922
1923 //
1924 // And return the pool allocation
1925 //
1926 ExpCheckPoolBlocks(Entry);
1927 Entry->PoolTag = Tag;
1928 return POOL_FREE_BLOCK(Entry);
1929 }
1930
1931 /*
1932 * @implemented
1933 */
1934 PVOID
1935 NTAPI
1936 ExAllocatePool(POOL_TYPE PoolType,
1937 SIZE_T NumberOfBytes)
1938 {
1939 //
1940 // Use a default tag of "None"
1941 //
1942 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, TAG_NONE);
1943 }
1944
1945 /*
1946 * @implemented
1947 */
1948 VOID
1949 NTAPI
1950 ExFreePoolWithTag(IN PVOID P,
1951 IN ULONG TagToFree)
1952 {
1953 PPOOL_HEADER Entry, NextEntry;
1954 USHORT BlockSize;
1955 KIRQL OldIrql;
1956 POOL_TYPE PoolType;
1957 PPOOL_DESCRIPTOR PoolDesc;
1958 ULONG Tag;
1959 BOOLEAN Combined = FALSE;
1960 PFN_NUMBER PageCount, RealPageCount;
1961 PKPRCB Prcb = KeGetCurrentPrcb();
1962 PGENERAL_LOOKASIDE LookasideList;
1963
1964 //
1965 // Check if any of the debug flags are enabled
1966 //
1967 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
1968 POOL_FLAG_CHECK_WORKERS |
1969 POOL_FLAG_CHECK_RESOURCES |
1970 POOL_FLAG_VERIFIER |
1971 POOL_FLAG_CHECK_DEADLOCK |
1972 POOL_FLAG_SPECIAL_POOL))
1973 {
1974 //
1975 // Check if special pool is enabled
1976 //
1977 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
1978 {
1979 //
1980 // Check if it was allocated from a special pool
1981 //
1982 if (MmIsSpecialPoolAddress(P))
1983 {
1984 //
1985 // Was deadlock verification also enabled? We can do some extra
1986 // checks at this point
1987 //
1988 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
1989 {
1990 DPRINT1("Verifier not yet supported\n");
1991 }
1992
1993 //
1994 // It is, so handle it via special pool free routine
1995 //
1996 MmFreeSpecialPool(P);
1997 return;
1998 }
1999 }
2000
2001 //
2002 // For non-big page allocations, we'll do a bunch of checks in here
2003 //
2004 if (PAGE_ALIGN(P) != P)
2005 {
2006 //
2007 // Get the entry for this pool allocation
2008 // The pointer math here may look wrong or confusing, but it is quite right
2009 //
2010 Entry = P;
2011 Entry--;
2012
2013 //
2014 // Get the pool type
2015 //
2016 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2017
2018 //
2019 // FIXME: Many other debugging checks go here
2020 //
2021 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2022 }
2023 }
2024
2025 //
2026 // Check if this is a big page allocation
2027 //
2028 if (PAGE_ALIGN(P) == P)
2029 {
2030 //
2031 // We need to find the tag for it, so first we need to find out what
2032 // kind of allocation this was (paged or nonpaged), then we can go
2033 // ahead and try finding the tag for it. Remember to get rid of the
2034 // PROTECTED_POOL tag if it's found.
2035 //
2036 // Note that if at insertion time, we failed to add the tag for a big
2037 // pool allocation, we used a special tag called 'BIG' to identify the
2038 // allocation, and we may get this tag back. In this scenario, we must
2039 // manually get the size of the allocation by actually counting through
2040 // the PFN database.
2041 //
2042 PoolType = MmDeterminePoolType(P);
2043 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2044 Tag = ExpFindAndRemoveTagBigPages(P, &PageCount, PoolType);
2045 if (!Tag)
2046 {
2047 DPRINT1("We do not know the size of this allocation. This is not yet supported\n");
2048 ASSERT(Tag == ' GIB');
2049 PageCount = 1; // We are going to lie! This might screw up accounting?
2050 }
2051 else if (Tag & PROTECTED_POOL)
2052 {
2053 Tag &= ~PROTECTED_POOL;
2054 }
2055
2056 //
2057 // We have our tag and our page count, so we can go ahead and remove this
2058 // tracker now
2059 //
2060 ExpRemovePoolTracker(Tag, PageCount << PAGE_SHIFT, PoolType);
2061
2062 //
2063 // Check if any of the debug flags are enabled
2064 //
2065 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
2066 POOL_FLAG_CHECK_WORKERS |
2067 POOL_FLAG_CHECK_RESOURCES |
2068 POOL_FLAG_CHECK_DEADLOCK))
2069 {
2070 //
2071 // Was deadlock verification also enabled? We can do some extra
2072 // checks at this point
2073 //
2074 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2075 {
2076 DPRINT1("Verifier not yet supported\n");
2077 }
2078
2079 //
2080 // FIXME: Many debugging checks go here
2081 //
2082 }
2083
2084 //
2085 // Update counters
2086 //
2087 PoolDesc = PoolVector[PoolType];
2088 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2089 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes,
2090 -(LONG_PTR)(PageCount << PAGE_SHIFT));
2091
2092 //
2093 // Do the real free now and update the last counter with the big page count
2094 //
2095 RealPageCount = MiFreePoolPages(P);
2096 ASSERT(RealPageCount == PageCount);
2097 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
2098 -(LONG)RealPageCount);
2099 return;
2100 }
2101
2102 //
2103 // Get the entry for this pool allocation
2104 // The pointer math here may look wrong or confusing, but it is quite right
2105 //
2106 Entry = P;
2107 Entry--;
2108
2109 //
2110 // Get the size of the entry, and it's pool type, then load the descriptor
2111 // for this pool type
2112 //
2113 BlockSize = Entry->BlockSize;
2114 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2115 PoolDesc = PoolVector[PoolType];
2116
2117 //
2118 // Make sure that the IRQL makes sense
2119 //
2120 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2121
2122 //
2123 // Get the pool tag and get rid of the PROTECTED_POOL flag
2124 //
2125 Tag = Entry->PoolTag;
2126 if (Tag & PROTECTED_POOL) Tag &= ~PROTECTED_POOL;
2127
2128 //
2129 // Stop tracking this allocation
2130 //
2131 ExpRemovePoolTracker(Tag,
2132 BlockSize * POOL_BLOCK_SIZE,
2133 Entry->PoolType - 1);
2134
2135 //
2136 // Is this allocation small enough to have come from a lookaside list?
2137 //
2138 if (BlockSize <= MAXIMUM_PROCESSORS)
2139 {
2140 //
2141 // Try pushing it into the per-CPU lookaside list
2142 //
2143 LookasideList = (PoolType == PagedPool) ?
2144 Prcb->PPPagedLookasideList[BlockSize - 1].P :
2145 Prcb->PPNPagedLookasideList[BlockSize - 1].P;
2146 LookasideList->TotalFrees++;
2147 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2148 {
2149 LookasideList->FreeHits++;
2150 InterlockedPushEntrySList(&LookasideList->ListHead, P);
2151 return;
2152 }
2153
2154 //
2155 // We failed, try to push it into the global lookaside list
2156 //
2157 LookasideList = (PoolType == PagedPool) ?
2158 Prcb->PPPagedLookasideList[BlockSize - 1].L :
2159 Prcb->PPNPagedLookasideList[BlockSize - 1].L;
2160 LookasideList->TotalFrees++;
2161 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2162 {
2163 LookasideList->FreeHits++;
2164 InterlockedPushEntrySList(&LookasideList->ListHead, P);
2165 return;
2166 }
2167 }
2168
2169 //
2170 // Get the pointer to the next entry
2171 //
2172 NextEntry = POOL_BLOCK(Entry, BlockSize);
2173
2174 //
2175 // Update performance counters
2176 //
2177 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2178 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, -BlockSize * POOL_BLOCK_SIZE);
2179
2180 //
2181 // Acquire the pool lock
2182 //
2183 OldIrql = ExLockPool(PoolDesc);
2184
2185 //
2186 // Check block tag
2187 //
2188 if (TagToFree && TagToFree != Entry->PoolTag)
2189 {
2190 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Entry->PoolTag);
2191 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Entry->PoolTag, TagToFree);
2192 }
2193
2194 //
2195 // Check if the next allocation is at the end of the page
2196 //
2197 ExpCheckPoolBlocks(Entry);
2198 if (PAGE_ALIGN(NextEntry) != NextEntry)
2199 {
2200 //
2201 // We may be able to combine the block if it's free
2202 //
2203 if (NextEntry->PoolType == 0)
2204 {
2205 //
2206 // The next block is free, so we'll do a combine
2207 //
2208 Combined = TRUE;
2209
2210 //
2211 // Make sure there's actual data in the block -- anything smaller
2212 // than this means we only have the header, so there's no linked list
2213 // for us to remove
2214 //
2215 if ((NextEntry->BlockSize != 1))
2216 {
2217 //
2218 // The block is at least big enough to have a linked list, so go
2219 // ahead and remove it
2220 //
2221 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2222 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2223 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2224 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2225 }
2226
2227 //
2228 // Our entry is now combined with the next entry
2229 //
2230 Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
2231 }
2232 }
2233
2234 //
2235 // Now check if there was a previous entry on the same page as us
2236 //
2237 if (Entry->PreviousSize)
2238 {
2239 //
2240 // Great, grab that entry and check if it's free
2241 //
2242 NextEntry = POOL_PREV_BLOCK(Entry);
2243 if (NextEntry->PoolType == 0)
2244 {
2245 //
2246 // It is, so we can do a combine
2247 //
2248 Combined = TRUE;
2249
2250 //
2251 // Make sure there's actual data in the block -- anything smaller
2252 // than this means we only have the header so there's no linked list
2253 // for us to remove
2254 //
2255 if ((NextEntry->BlockSize != 1))
2256 {
2257 //
2258 // The block is at least big enough to have a linked list, so go
2259 // ahead and remove it
2260 //
2261 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2262 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2263 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2264 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2265 }
2266
2267 //
2268 // Combine our original block (which might've already been combined
2269 // with the next block), into the previous block
2270 //
2271 NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
2272
2273 //
2274 // And now we'll work with the previous block instead
2275 //
2276 Entry = NextEntry;
2277 }
2278 }
2279
2280 //
2281 // By now, it may have been possible for our combined blocks to actually
2282 // have made up a full page (if there were only 2-3 allocations on the
2283 // page, they could've all been combined).
2284 //
2285 if ((PAGE_ALIGN(Entry) == Entry) &&
2286 (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry)) == POOL_NEXT_BLOCK(Entry)))
2287 {
2288 //
2289 // In this case, release the pool lock, update the performance counter,
2290 // and free the page
2291 //
2292 ExUnlockPool(PoolDesc, OldIrql);
2293 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalPages, -1);
2294 MiFreePoolPages(Entry);
2295 return;
2296 }
2297
2298 //
2299 // Otherwise, we now have a free block (or a combination of 2 or 3)
2300 //
2301 Entry->PoolType = 0;
2302 BlockSize = Entry->BlockSize;
2303 ASSERT(BlockSize != 1);
2304
2305 //
2306 // Check if we actually did combine it with anyone
2307 //
2308 if (Combined)
2309 {
2310 //
2311 // Get the first combined block (either our original to begin with, or
2312 // the one after the original, depending if we combined with the previous)
2313 //
2314 NextEntry = POOL_NEXT_BLOCK(Entry);
2315
2316 //
2317 // As long as the next block isn't on a page boundary, have it point
2318 // back to us
2319 //
2320 if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
2321 }
2322
2323 //
2324 // Insert this new free block, and release the pool lock
2325 //
2326 ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
2327 ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry));
2328 ExUnlockPool(PoolDesc, OldIrql);
2329 }
2330
2331 /*
2332 * @implemented
2333 */
2334 VOID
2335 NTAPI
2336 ExFreePool(PVOID P)
2337 {
2338 //
2339 // Just free without checking for the tag
2340 //
2341 ExFreePoolWithTag(P, 0);
2342 }
2343
2344 /*
2345 * @unimplemented
2346 */
2347 SIZE_T
2348 NTAPI
2349 ExQueryPoolBlockSize(IN PVOID PoolBlock,
2350 OUT PBOOLEAN QuotaCharged)
2351 {
2352 //
2353 // Not implemented
2354 //
2355 UNIMPLEMENTED;
2356 return FALSE;
2357 }
2358
2359 /*
2360 * @implemented
2361 */
2362
2363 PVOID
2364 NTAPI
2365 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType,
2366 IN SIZE_T NumberOfBytes)
2367 {
2368 //
2369 // Allocate the pool
2370 //
2371 return ExAllocatePoolWithQuotaTag(PoolType, NumberOfBytes, 'enoN');
2372 }
2373
2374 /*
2375 * @implemented
2376 */
2377 PVOID
2378 NTAPI
2379 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType,
2380 IN SIZE_T NumberOfBytes,
2381 IN ULONG Tag,
2382 IN EX_POOL_PRIORITY Priority)
2383 {
2384 //
2385 // Allocate the pool
2386 //
2387 UNIMPLEMENTED;
2388 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2389 }
2390
2391 /*
2392 * @implemented
2393 */
2394 PVOID
2395 NTAPI
2396 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType,
2397 IN SIZE_T NumberOfBytes,
2398 IN ULONG Tag)
2399 {
2400 //
2401 // Allocate the pool
2402 //
2403 UNIMPLEMENTED;
2404 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2405 }
2406
2407 /* EOF */