[NTOSKRNL]
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / expool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include "../ARM3/miarm.h"
17
18 #undef ExAllocatePoolWithQuota
19 #undef ExAllocatePoolWithQuotaTag
20
21 /* GLOBALS ********************************************************************/
22
23 #define POOL_BIG_TABLE_ENTRY_FREE 0x1
24
25 typedef struct _POOL_DPC_CONTEXT
26 {
27 PPOOL_TRACKER_TABLE PoolTrackTable;
28 SIZE_T PoolTrackTableSize;
29 PPOOL_TRACKER_TABLE PoolTrackTableExpansion;
30 SIZE_T PoolTrackTableSizeExpansion;
31 } POOL_DPC_CONTEXT, *PPOOL_DPC_CONTEXT;
32
33 ULONG ExpNumberOfPagedPools;
34 POOL_DESCRIPTOR NonPagedPoolDescriptor;
35 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16 + 1];
36 PPOOL_DESCRIPTOR PoolVector[2];
37 PKGUARDED_MUTEX ExpPagedPoolMutex;
38 SIZE_T PoolTrackTableSize, PoolTrackTableMask;
39 SIZE_T PoolBigPageTableSize, PoolBigPageTableHash;
40 PPOOL_TRACKER_TABLE PoolTrackTable;
41 PPOOL_TRACKER_BIG_PAGES PoolBigPageTable;
42 KSPIN_LOCK ExpTaggedPoolLock;
43 ULONG PoolHitTag;
44 BOOLEAN ExStopBadTags;
45 KSPIN_LOCK ExpLargePoolTableLock;
46 LONG ExpPoolBigEntriesInUse;
47 ULONG ExpPoolFlags;
48 ULONG ExPoolFailures;
49
50 /* Pool block/header/list access macros */
51 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
52 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER))
53 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
54 #define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
55 #define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize))
56
57 /*
58 * Pool list access debug macros, similar to Arthur's pfnlist.c work.
59 * Microsoft actually implements similar checks in the Windows Server 2003 SP1
60 * pool code, but only for checked builds.
61 *
62 * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
63 * that these checks are done even on retail builds, due to the increasing
64 * number of kernel-mode attacks which depend on dangling list pointers and other
65 * kinds of list-based attacks.
66 *
67 * For now, I will leave these checks on all the time, but later they are likely
68 * to be DBG-only, at least until there are enough kernel-mode security attacks
69 * against ReactOS to warrant the performance hit.
70 *
71 * For now, these are not made inline, so we can get good stack traces.
72 */
73 PLIST_ENTRY
74 NTAPI
75 ExpDecodePoolLink(IN PLIST_ENTRY Link)
76 {
77 return (PLIST_ENTRY)((ULONG_PTR)Link & ~1);
78 }
79
80 PLIST_ENTRY
81 NTAPI
82 ExpEncodePoolLink(IN PLIST_ENTRY Link)
83 {
84 return (PLIST_ENTRY)((ULONG_PTR)Link | 1);
85 }
86
87 VOID
88 NTAPI
89 ExpCheckPoolLinks(IN PLIST_ENTRY ListHead)
90 {
91 if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) ||
92 (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead))
93 {
94 KeBugCheckEx(BAD_POOL_HEADER,
95 3,
96 (ULONG_PTR)ListHead,
97 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink),
98 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink));
99 }
100 }
101
102 VOID
103 NTAPI
104 ExpInitializePoolListHead(IN PLIST_ENTRY ListHead)
105 {
106 ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead);
107 }
108
109 BOOLEAN
110 NTAPI
111 ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead)
112 {
113 return (ExpDecodePoolLink(ListHead->Flink) == ListHead);
114 }
115
116 VOID
117 NTAPI
118 ExpRemovePoolEntryList(IN PLIST_ENTRY Entry)
119 {
120 PLIST_ENTRY Blink, Flink;
121 Flink = ExpDecodePoolLink(Entry->Flink);
122 Blink = ExpDecodePoolLink(Entry->Blink);
123 Flink->Blink = ExpEncodePoolLink(Blink);
124 Blink->Flink = ExpEncodePoolLink(Flink);
125 }
126
127 PLIST_ENTRY
128 NTAPI
129 ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead)
130 {
131 PLIST_ENTRY Entry, Flink;
132 Entry = ExpDecodePoolLink(ListHead->Flink);
133 Flink = ExpDecodePoolLink(Entry->Flink);
134 ListHead->Flink = ExpEncodePoolLink(Flink);
135 Flink->Blink = ExpEncodePoolLink(ListHead);
136 return Entry;
137 }
138
139 PLIST_ENTRY
140 NTAPI
141 ExpRemovePoolTailList(IN PLIST_ENTRY ListHead)
142 {
143 PLIST_ENTRY Entry, Blink;
144 Entry = ExpDecodePoolLink(ListHead->Blink);
145 Blink = ExpDecodePoolLink(Entry->Blink);
146 ListHead->Blink = ExpEncodePoolLink(Blink);
147 Blink->Flink = ExpEncodePoolLink(ListHead);
148 return Entry;
149 }
150
151 VOID
152 NTAPI
153 ExpInsertPoolTailList(IN PLIST_ENTRY ListHead,
154 IN PLIST_ENTRY Entry)
155 {
156 PLIST_ENTRY Blink;
157 ExpCheckPoolLinks(ListHead);
158 Blink = ExpDecodePoolLink(ListHead->Blink);
159 Entry->Flink = ExpEncodePoolLink(ListHead);
160 Entry->Blink = ExpEncodePoolLink(Blink);
161 Blink->Flink = ExpEncodePoolLink(Entry);
162 ListHead->Blink = ExpEncodePoolLink(Entry);
163 ExpCheckPoolLinks(ListHead);
164 }
165
166 VOID
167 NTAPI
168 ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead,
169 IN PLIST_ENTRY Entry)
170 {
171 PLIST_ENTRY Flink;
172 ExpCheckPoolLinks(ListHead);
173 Flink = ExpDecodePoolLink(ListHead->Flink);
174 Entry->Flink = ExpEncodePoolLink(Flink);
175 Entry->Blink = ExpEncodePoolLink(ListHead);
176 Flink->Blink = ExpEncodePoolLink(Entry);
177 ListHead->Flink = ExpEncodePoolLink(Entry);
178 ExpCheckPoolLinks(ListHead);
179 }
180
181 VOID
182 NTAPI
183 ExpCheckPoolHeader(IN PPOOL_HEADER Entry)
184 {
185 PPOOL_HEADER PreviousEntry, NextEntry;
186
187 /* Is there a block before this one? */
188 if (Entry->PreviousSize)
189 {
190 /* Get it */
191 PreviousEntry = POOL_PREV_BLOCK(Entry);
192
193 /* The two blocks must be on the same page! */
194 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry))
195 {
196 /* Something is awry */
197 KeBugCheckEx(BAD_POOL_HEADER,
198 6,
199 (ULONG_PTR)PreviousEntry,
200 __LINE__,
201 (ULONG_PTR)Entry);
202 }
203
204 /* This block should also indicate that it's as large as we think it is */
205 if (PreviousEntry->BlockSize != Entry->PreviousSize)
206 {
207 /* Otherwise, someone corrupted one of the sizes */
208 KeBugCheckEx(BAD_POOL_HEADER,
209 5,
210 (ULONG_PTR)PreviousEntry,
211 __LINE__,
212 (ULONG_PTR)Entry);
213 }
214 }
215 else if (PAGE_ALIGN(Entry) != Entry)
216 {
217 /* If there's no block before us, we are the first block, so we should be on a page boundary */
218 KeBugCheckEx(BAD_POOL_HEADER,
219 7,
220 0,
221 __LINE__,
222 (ULONG_PTR)Entry);
223 }
224
225 /* This block must have a size */
226 if (!Entry->BlockSize)
227 {
228 /* Someone must've corrupted this field */
229 KeBugCheckEx(BAD_POOL_HEADER,
230 8,
231 0,
232 __LINE__,
233 (ULONG_PTR)Entry);
234 }
235
236 /* Okay, now get the next block */
237 NextEntry = POOL_NEXT_BLOCK(Entry);
238
239 /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
240 if (PAGE_ALIGN(NextEntry) != NextEntry)
241 {
242 /* The two blocks must be on the same page! */
243 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry))
244 {
245 /* Something is messed up */
246 KeBugCheckEx(BAD_POOL_HEADER,
247 9,
248 (ULONG_PTR)NextEntry,
249 __LINE__,
250 (ULONG_PTR)Entry);
251 }
252
253 /* And this block should think we are as large as we truly are */
254 if (NextEntry->PreviousSize != Entry->BlockSize)
255 {
256 /* Otherwise, someone corrupted the field */
257 KeBugCheckEx(BAD_POOL_HEADER,
258 5,
259 (ULONG_PTR)NextEntry,
260 __LINE__,
261 (ULONG_PTR)Entry);
262 }
263 }
264 }
265
266 VOID
267 NTAPI
268 ExpCheckPoolBlocks(IN PVOID Block)
269 {
270 BOOLEAN FoundBlock = FALSE;
271 SIZE_T Size = 0;
272 PPOOL_HEADER Entry;
273
274 /* Get the first entry for this page, make sure it really is the first */
275 Entry = PAGE_ALIGN(Block);
276 ASSERT(Entry->PreviousSize == 0);
277
278 /* Now scan each entry */
279 while (TRUE)
280 {
281 /* When we actually found our block, remember this */
282 if (Entry == Block) FoundBlock = TRUE;
283
284 /* Now validate this block header */
285 ExpCheckPoolHeader(Entry);
286
287 /* And go to the next one, keeping track of our size */
288 Size += Entry->BlockSize;
289 Entry = POOL_NEXT_BLOCK(Entry);
290
291 /* If we hit the last block, stop */
292 if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break;
293
294 /* If we hit the end of the page, stop */
295 if (PAGE_ALIGN(Entry) == Entry) break;
296 }
297
298 /* We must've found our block, and we must have hit the end of the page */
299 if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock))
300 {
301 /* Otherwise, the blocks are messed up */
302 KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry);
303 }
304 }
305
306 FORCEINLINE
307 VOID
308 ExpCheckPoolIrqlLevel(IN POOL_TYPE PoolType,
309 IN ULONG NumberOfBytes,
310 IN PVOID Entry)
311 {
312 //
313 // Validate IRQL: It must be APC_LEVEL or lower for Paged Pool, and it must
314 // be DISPATCH_LEVEL or lower for Non Paged Pool
315 //
316 if (((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) ?
317 (KeGetCurrentIrql() > APC_LEVEL) :
318 (KeGetCurrentIrql() > DISPATCH_LEVEL))
319 {
320 //
321 // Take the system down
322 //
323 KeBugCheckEx(BAD_POOL_CALLER,
324 !Entry ? POOL_ALLOC_IRQL_INVALID : POOL_FREE_IRQL_INVALID,
325 KeGetCurrentIrql(),
326 PoolType,
327 !Entry ? NumberOfBytes : (ULONG_PTR)Entry);
328 }
329 }
330
331 FORCEINLINE
332 ULONG
333 ExpComputeHashForTag(IN ULONG Tag,
334 IN SIZE_T BucketMask)
335 {
336 //
337 // Compute the hash by multiplying with a large prime number and then XORing
338 // with the HIDWORD of the result.
339 //
340 // Finally, AND with the bucket mask to generate a valid index/bucket into
341 // the table
342 //
343 ULONGLONG Result = 40543 * Tag;
344 return BucketMask & (Result ^ (Result >> 32));
345 }
346
347 FORCEINLINE
348 ULONG
349 ExpComputePartialHashForAddress(IN PVOID BaseAddress)
350 {
351 ULONG Result;
352 //
353 // Compute the hash by converting the address into a page number, and then
354 // XORing each nibble with the next one.
355 //
356 // We do *NOT* AND with the bucket mask at this point because big table expansion
357 // might happen. Therefore, the final step of the hash must be performed
358 // while holding the expansion pushlock, and this is why we call this a
359 // "partial" hash only.
360 //
361 Result = (ULONG_PTR)BaseAddress >> PAGE_SHIFT;
362 return (Result >> 24) ^ (Result >> 16) ^ (Result >> 8) ^ Result;
363 }
364
365 /* PRIVATE FUNCTIONS **********************************************************/
366
367 VOID
368 NTAPI
369 INIT_FUNCTION
370 ExpSeedHotTags(VOID)
371 {
372 ULONG i, Key, Hash, Index;
373 PPOOL_TRACKER_TABLE TrackTable = PoolTrackTable;
374 ULONG TagList[] =
375 {
376 ' oI',
377 ' laH',
378 'PldM',
379 'LooP',
380 'tSbO',
381 ' prI',
382 'bdDN',
383 'LprI',
384 'pOoI',
385 ' ldM',
386 'eliF',
387 'aVMC',
388 'dSeS',
389 'CFtN',
390 'looP',
391 'rPCT',
392 'bNMC',
393 'dTeS',
394 'sFtN',
395 'TPCT',
396 'CPCT',
397 ' yeK',
398 'qSbO',
399 'mNoI',
400 'aEoI',
401 'cPCT',
402 'aFtN',
403 '0ftN',
404 'tceS',
405 'SprI',
406 'ekoT',
407 ' eS',
408 'lCbO',
409 'cScC',
410 'lFtN',
411 'cAeS',
412 'mfSF',
413 'kWcC',
414 'miSF',
415 'CdfA',
416 'EdfA',
417 'orSF',
418 'nftN',
419 'PRIU',
420 'rFpN',
421 'RFpN',
422 'aPeS',
423 'sUeS',
424 'FpcA',
425 'MpcA',
426 'cSeS',
427 'mNbO',
428 'sFpN',
429 'uLeS',
430 'DPcS',
431 'nevE',
432 'vrqR',
433 'ldaV',
434 ' pP',
435 'SdaV',
436 ' daV',
437 'LdaV',
438 'FdaV',
439 ' GIB',
440 };
441
442 //
443 // Loop all 64 hot tags
444 //
445 ASSERT((sizeof(TagList) / sizeof(ULONG)) == 64);
446 for (i = 0; i < sizeof(TagList) / sizeof(ULONG); i++)
447 {
448 //
449 // Get the current tag, and compute its hash in the tracker table
450 //
451 Key = TagList[i];
452 Hash = ExpComputeHashForTag(Key, PoolTrackTableMask);
453
454 //
455 // Loop all the hashes in this index/bucket
456 //
457 Index = Hash;
458 while (TRUE)
459 {
460 //
461 // Find an empty entry, and make sure this isn't the last hash that
462 // can fit.
463 //
464 // On checked builds, also make sure this is the first time we are
465 // seeding this tag.
466 //
467 ASSERT(TrackTable[Hash].Key != Key);
468 if (!(TrackTable[Hash].Key) && (Hash != PoolTrackTableSize - 1))
469 {
470 //
471 // It has been seeded, move on to the next tag
472 //
473 TrackTable[Hash].Key = Key;
474 break;
475 }
476
477 //
478 // This entry was already taken, compute the next possible hash while
479 // making sure we're not back at our initial index.
480 //
481 ASSERT(TrackTable[Hash].Key != Key);
482 Hash = (Hash + 1) & PoolTrackTableMask;
483 if (Hash == Index) break;
484 }
485 }
486 }
487
488 VOID
489 NTAPI
490 ExpRemovePoolTracker(IN ULONG Key,
491 IN SIZE_T NumberOfBytes,
492 IN POOL_TYPE PoolType)
493 {
494 ULONG Hash, Index;
495 PPOOL_TRACKER_TABLE Table, TableEntry;
496 SIZE_T TableMask, TableSize;
497
498 //
499 // Remove the PROTECTED_POOL flag which is not part of the tag
500 //
501 Key &= ~PROTECTED_POOL;
502
503 //
504 // With WinDBG you can set a tag you want to break on when an allocation is
505 // attempted
506 //
507 if (Key == PoolHitTag) DbgBreakPoint();
508
509 //
510 // Why the double indirection? Because normally this function is also used
511 // when doing session pool allocations, which has another set of tables,
512 // sizes, and masks that live in session pool. Now we don't support session
513 // pool so we only ever use the regular tables, but I'm keeping the code this
514 // way so that the day we DO support session pool, it won't require that
515 // many changes
516 //
517 Table = PoolTrackTable;
518 TableMask = PoolTrackTableMask;
519 TableSize = PoolTrackTableSize;
520
521 //
522 // Compute the hash for this key, and loop all the possible buckets
523 //
524 Hash = ExpComputeHashForTag(Key, TableMask);
525 Index = Hash;
526 while (TRUE)
527 {
528 //
529 // Have we found the entry for this tag? */
530 //
531 TableEntry = &Table[Hash];
532 if (TableEntry->Key == Key)
533 {
534 //
535 // Decrement the counters depending on if this was paged or nonpaged
536 // pool
537 //
538 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
539 {
540 InterlockedIncrement(&TableEntry->NonPagedFrees);
541 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes, -NumberOfBytes);
542 return;
543 }
544 InterlockedIncrement(&TableEntry->PagedFrees);
545 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes, -NumberOfBytes);
546 return;
547 }
548
549 //
550 // We should have only ended up with an empty entry if we've reached
551 // the last bucket
552 //
553 if (!TableEntry->Key) ASSERT(Hash == TableMask);
554
555 //
556 // This path is hit when we don't have an entry, and the current bucket
557 // is full, so we simply try the next one
558 //
559 Hash = (Hash + 1) & TableMask;
560 if (Hash == Index) break;
561 }
562
563 //
564 // And finally this path is hit when all the buckets are full, and we need
565 // some expansion. This path is not yet supported in ReactOS and so we'll
566 // ignore the tag
567 //
568 DPRINT1("Out of pool tag space, ignoring...\n");
569 }
570
571 VOID
572 NTAPI
573 ExpInsertPoolTracker(IN ULONG Key,
574 IN SIZE_T NumberOfBytes,
575 IN POOL_TYPE PoolType)
576 {
577 ULONG Hash, Index;
578 KIRQL OldIrql;
579 PPOOL_TRACKER_TABLE Table, TableEntry;
580 SIZE_T TableMask, TableSize;
581
582 //
583 // Remove the PROTECTED_POOL flag which is not part of the tag
584 //
585 Key &= ~PROTECTED_POOL;
586
587 //
588 // With WinDBG you can set a tag you want to break on when an allocation is
589 // attempted
590 //
591 if (Key == PoolHitTag) DbgBreakPoint();
592
593 //
594 // There is also an internal flag you can set to break on malformed tags
595 //
596 if (ExStopBadTags) ASSERT(Key & 0xFFFFFF00);
597
598 //
599 // ASSERT on ReactOS features not yet supported
600 //
601 ASSERT(!(PoolType & SESSION_POOL_MASK));
602 ASSERT(KeGetCurrentProcessorNumber() == 0);
603
604 //
605 // Why the double indirection? Because normally this function is also used
606 // when doing session pool allocations, which has another set of tables,
607 // sizes, and masks that live in session pool. Now we don't support session
608 // pool so we only ever use the regular tables, but I'm keeping the code this
609 // way so that the day we DO support session pool, it won't require that
610 // many changes
611 //
612 Table = PoolTrackTable;
613 TableMask = PoolTrackTableMask;
614 TableSize = PoolTrackTableSize;
615
616 //
617 // Compute the hash for this key, and loop all the possible buckets
618 //
619 Hash = ExpComputeHashForTag(Key, TableMask);
620 Index = Hash;
621 while (TRUE)
622 {
623 //
624 // Do we already have an entry for this tag? */
625 //
626 TableEntry = &Table[Hash];
627 if (TableEntry->Key == Key)
628 {
629 //
630 // Increment the counters depending on if this was paged or nonpaged
631 // pool
632 //
633 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
634 {
635 InterlockedIncrement(&TableEntry->NonPagedAllocs);
636 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes, NumberOfBytes);
637 return;
638 }
639 InterlockedIncrement(&TableEntry->PagedAllocs);
640 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes, NumberOfBytes);
641 return;
642 }
643
644 //
645 // We don't have an entry yet, but we've found a free bucket for it
646 //
647 if (!(TableEntry->Key) && (Hash != PoolTrackTableSize - 1))
648 {
649 //
650 // We need to hold the lock while creating a new entry, since other
651 // processors might be in this code path as well
652 //
653 ExAcquireSpinLock(&ExpTaggedPoolLock, &OldIrql);
654 if (!PoolTrackTable[Hash].Key)
655 {
656 //
657 // We've won the race, so now create this entry in the bucket
658 //
659 ASSERT(Table[Hash].Key == 0);
660 PoolTrackTable[Hash].Key = Key;
661 TableEntry->Key = Key;
662 }
663 ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql);
664
665 //
666 // Now we force the loop to run again, and we should now end up in
667 // the code path above which does the interlocked increments...
668 //
669 continue;
670 }
671
672 //
673 // This path is hit when we don't have an entry, and the current bucket
674 // is full, so we simply try the next one
675 //
676 Hash = (Hash + 1) & TableMask;
677 if (Hash == Index) break;
678 }
679
680 //
681 // And finally this path is hit when all the buckets are full, and we need
682 // some expansion. This path is not yet supported in ReactOS and so we'll
683 // ignore the tag
684 //
685 DPRINT1("Out of pool tag space, ignoring...\n");
686 }
687
688 VOID
689 NTAPI
690 INIT_FUNCTION
691 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor,
692 IN POOL_TYPE PoolType,
693 IN ULONG PoolIndex,
694 IN ULONG Threshold,
695 IN PVOID PoolLock)
696 {
697 PLIST_ENTRY NextEntry, LastEntry;
698
699 //
700 // Setup the descriptor based on the caller's request
701 //
702 PoolDescriptor->PoolType = PoolType;
703 PoolDescriptor->PoolIndex = PoolIndex;
704 PoolDescriptor->Threshold = Threshold;
705 PoolDescriptor->LockAddress = PoolLock;
706
707 //
708 // Initialize accounting data
709 //
710 PoolDescriptor->RunningAllocs = 0;
711 PoolDescriptor->RunningDeAllocs = 0;
712 PoolDescriptor->TotalPages = 0;
713 PoolDescriptor->TotalBytes = 0;
714 PoolDescriptor->TotalBigPages = 0;
715
716 //
717 // Nothing pending for now
718 //
719 PoolDescriptor->PendingFrees = NULL;
720 PoolDescriptor->PendingFreeDepth = 0;
721
722 //
723 // Loop all the descriptor's allocation lists and initialize them
724 //
725 NextEntry = PoolDescriptor->ListHeads;
726 LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
727 while (NextEntry < LastEntry)
728 {
729 ExpInitializePoolListHead(NextEntry);
730 NextEntry++;
731 }
732
733 //
734 // Note that ReactOS does not support Session Pool Yet
735 //
736 ASSERT(PoolType != PagedPoolSession);
737 }
738
739 VOID
740 NTAPI
741 INIT_FUNCTION
742 InitializePool(IN POOL_TYPE PoolType,
743 IN ULONG Threshold)
744 {
745 PPOOL_DESCRIPTOR Descriptor;
746 SIZE_T TableSize;
747 ULONG i;
748
749 //
750 // Check what kind of pool this is
751 //
752 if (PoolType == NonPagedPool)
753 {
754 //
755 // Compute the track table size and convert it from a power of two to an
756 // actual byte size
757 //
758 // NOTE: On checked builds, we'll assert if the registry table size was
759 // invalid, while on retail builds we'll just break out of the loop at
760 // that point.
761 //
762 TableSize = min(PoolTrackTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
763 for (i = 0; i < 32; i++)
764 {
765 if (TableSize & 1)
766 {
767 ASSERT((TableSize & ~1) == 0);
768 if (!(TableSize & ~1)) break;
769 }
770 TableSize >>= 1;
771 }
772
773 //
774 // If we hit bit 32, than no size was defined in the registry, so
775 // we'll use the default size of 2048 entries.
776 //
777 // Otherwise, use the size from the registry, as long as it's not
778 // smaller than 64 entries.
779 //
780 if (i == 32)
781 {
782 PoolTrackTableSize = 2048;
783 }
784 else
785 {
786 PoolTrackTableSize = max(1 << i, 64);
787 }
788
789 //
790 // Loop trying with the biggest specified size first, and cut it down
791 // by a power of two each iteration in case not enough memory exist
792 //
793 while (TRUE)
794 {
795 //
796 // Do not allow overflow
797 //
798 if ((PoolTrackTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_TABLE)))
799 {
800 PoolTrackTableSize >>= 1;
801 continue;
802 }
803
804 //
805 // Allocate the tracker table and exit the loop if this worked
806 //
807 PoolTrackTable = MiAllocatePoolPages(NonPagedPool,
808 (PoolTrackTableSize + 1) *
809 sizeof(POOL_TRACKER_TABLE));
810 if (PoolTrackTable) break;
811
812 //
813 // Otherwise, as long as we're not down to the last bit, keep
814 // iterating
815 //
816 if (PoolTrackTableSize == 1)
817 {
818 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
819 TableSize,
820 0xFFFFFFFF,
821 0xFFFFFFFF,
822 0xFFFFFFFF);
823 }
824 PoolTrackTableSize >>= 1;
825 }
826
827 //
828 // Finally, add one entry, compute the hash, and zero the table
829 //
830 PoolTrackTableSize++;
831 PoolTrackTableMask = PoolTrackTableSize - 2;
832
833 RtlZeroMemory(PoolTrackTable,
834 PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
835
836 //
837 // We now do the exact same thing with the tracker table for big pages
838 //
839 TableSize = min(PoolBigPageTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
840 for (i = 0; i < 32; i++)
841 {
842 if (TableSize & 1)
843 {
844 ASSERT((TableSize & ~1) == 0);
845 if (!(TableSize & ~1)) break;
846 }
847 TableSize >>= 1;
848 }
849
850 //
851 // For big pages, the default tracker table is 4096 entries, while the
852 // minimum is still 64
853 //
854 if (i == 32)
855 {
856 PoolBigPageTableSize = 4096;
857 }
858 else
859 {
860 PoolBigPageTableSize = max(1 << i, 64);
861 }
862
863 //
864 // Again, run the exact same loop we ran earlier, but this time for the
865 // big pool tracker instead
866 //
867 while (TRUE)
868 {
869 if ((PoolBigPageTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_BIG_PAGES)))
870 {
871 PoolBigPageTableSize >>= 1;
872 continue;
873 }
874
875 PoolBigPageTable = MiAllocatePoolPages(NonPagedPool,
876 PoolBigPageTableSize *
877 sizeof(POOL_TRACKER_BIG_PAGES));
878 if (PoolBigPageTable) break;
879
880 if (PoolBigPageTableSize == 1)
881 {
882 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
883 TableSize,
884 0xFFFFFFFF,
885 0xFFFFFFFF,
886 0xFFFFFFFF);
887 }
888
889 PoolBigPageTableSize >>= 1;
890 }
891
892 //
893 // An extra entry is not needed for for the big pool tracker, so just
894 // compute the hash and zero it
895 //
896 PoolBigPageTableHash = PoolBigPageTableSize - 1;
897 RtlZeroMemory(PoolBigPageTable,
898 PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
899 for (i = 0; i < PoolBigPageTableSize; i++) PoolBigPageTable[i].Va = (PVOID)1;
900
901 //
902 // During development, print this out so we can see what's happening
903 //
904 DPRINT1("EXPOOL: Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
905 PoolTrackTable, PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
906 DPRINT1("EXPOOL: Big Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
907 PoolBigPageTable, PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
908
909 //
910 // Insert the generic tracker for all of big pool
911 //
912 ExpInsertPoolTracker('looP',
913 ROUND_TO_PAGES(PoolBigPageTableSize *
914 sizeof(POOL_TRACKER_BIG_PAGES)),
915 NonPagedPool);
916
917 //
918 // No support for NUMA systems at this time
919 //
920 ASSERT(KeNumberNodes == 1);
921
922 //
923 // Initialize the tag spinlock
924 //
925 KeInitializeSpinLock(&ExpTaggedPoolLock);
926
927 //
928 // Initialize the nonpaged pool descriptor
929 //
930 PoolVector[NonPagedPool] = &NonPagedPoolDescriptor;
931 ExInitializePoolDescriptor(PoolVector[NonPagedPool],
932 NonPagedPool,
933 0,
934 Threshold,
935 NULL);
936 }
937 else
938 {
939 //
940 // No support for NUMA systems at this time
941 //
942 ASSERT(KeNumberNodes == 1);
943
944 //
945 // Allocate the pool descriptor
946 //
947 Descriptor = ExAllocatePoolWithTag(NonPagedPool,
948 sizeof(KGUARDED_MUTEX) +
949 sizeof(POOL_DESCRIPTOR),
950 'looP');
951 if (!Descriptor)
952 {
953 //
954 // This is really bad...
955 //
956 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
957 0,
958 -1,
959 -1,
960 -1);
961 }
962
963 //
964 // Setup the vector and guarded mutex for paged pool
965 //
966 PoolVector[PagedPool] = Descriptor;
967 ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1);
968 ExpPagedPoolDescriptor[0] = Descriptor;
969 KeInitializeGuardedMutex(ExpPagedPoolMutex);
970 ExInitializePoolDescriptor(Descriptor,
971 PagedPool,
972 0,
973 Threshold,
974 ExpPagedPoolMutex);
975
976 //
977 // Insert the generic tracker for all of nonpaged pool
978 //
979 ExpInsertPoolTracker('looP',
980 ROUND_TO_PAGES(PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)),
981 NonPagedPool);
982 }
983 }
984
985 FORCEINLINE
986 KIRQL
987 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
988 {
989 //
990 // Check if this is nonpaged pool
991 //
992 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
993 {
994 //
995 // Use the queued spin lock
996 //
997 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
998 }
999 else
1000 {
1001 //
1002 // Use the guarded mutex
1003 //
1004 KeAcquireGuardedMutex(Descriptor->LockAddress);
1005 return APC_LEVEL;
1006 }
1007 }
1008
1009 FORCEINLINE
1010 VOID
1011 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor,
1012 IN KIRQL OldIrql)
1013 {
1014 //
1015 // Check if this is nonpaged pool
1016 //
1017 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1018 {
1019 //
1020 // Use the queued spin lock
1021 //
1022 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
1023 }
1024 else
1025 {
1026 //
1027 // Use the guarded mutex
1028 //
1029 KeReleaseGuardedMutex(Descriptor->LockAddress);
1030 }
1031 }
1032
1033 VOID
1034 NTAPI
1035 ExpGetPoolTagInfoTarget(IN PKDPC Dpc,
1036 IN PVOID DeferredContext,
1037 IN PVOID SystemArgument1,
1038 IN PVOID SystemArgument2)
1039 {
1040 PPOOL_DPC_CONTEXT Context = DeferredContext;
1041 UNREFERENCED_PARAMETER(Dpc);
1042 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
1043
1044 //
1045 // Make sure we win the race, and if we did, copy the data atomically
1046 //
1047 if (KeSignalCallDpcSynchronize(SystemArgument2))
1048 {
1049 RtlCopyMemory(Context->PoolTrackTable,
1050 PoolTrackTable,
1051 Context->PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1052
1053 //
1054 // This is here because ReactOS does not yet support expansion
1055 //
1056 ASSERT(Context->PoolTrackTableSizeExpansion == 0);
1057 }
1058
1059 //
1060 // Regardless of whether we won or not, we must now synchronize and then
1061 // decrement the barrier since this is one more processor that has completed
1062 // the callback.
1063 //
1064 KeSignalCallDpcSynchronize(SystemArgument2);
1065 KeSignalCallDpcDone(SystemArgument1);
1066 }
1067
1068 NTSTATUS
1069 NTAPI
1070 ExGetPoolTagInfo(IN PSYSTEM_POOLTAG_INFORMATION SystemInformation,
1071 IN ULONG SystemInformationLength,
1072 IN OUT PULONG ReturnLength OPTIONAL)
1073 {
1074 SIZE_T TableSize, CurrentLength;
1075 ULONG EntryCount;
1076 NTSTATUS Status = STATUS_SUCCESS;
1077 PSYSTEM_POOLTAG TagEntry;
1078 PPOOL_TRACKER_TABLE Buffer, TrackerEntry;
1079 POOL_DPC_CONTEXT Context;
1080 ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
1081
1082 //
1083 // Keep track of how much data the caller's buffer must hold
1084 //
1085 CurrentLength = FIELD_OFFSET(SYSTEM_POOLTAG_INFORMATION, TagInfo);
1086
1087 //
1088 // Initialize the caller's buffer
1089 //
1090 TagEntry = &SystemInformation->TagInfo[0];
1091 SystemInformation->Count = 0;
1092
1093 //
1094 // Capture the number of entries, and the total size needed to make a copy
1095 // of the table
1096 //
1097 EntryCount = PoolTrackTableSize;
1098 TableSize = EntryCount * sizeof(POOL_TRACKER_TABLE);
1099
1100 //
1101 // Allocate the "Generic DPC" temporary buffer
1102 //
1103 Buffer = ExAllocatePoolWithTag(NonPagedPool, TableSize, 'ofnI');
1104 if (!Buffer) return STATUS_INSUFFICIENT_RESOURCES;
1105
1106 //
1107 // Do a "Generic DPC" to atomically retrieve the tag and allocation data
1108 //
1109 Context.PoolTrackTable = Buffer;
1110 Context.PoolTrackTableSize = PoolTrackTableSize;
1111 Context.PoolTrackTableExpansion = NULL;
1112 Context.PoolTrackTableSizeExpansion = 0;
1113 KeGenericCallDpc(ExpGetPoolTagInfoTarget, &Context);
1114
1115 //
1116 // Now parse the results
1117 //
1118 for (TrackerEntry = Buffer; TrackerEntry < (Buffer + EntryCount); TrackerEntry++)
1119 {
1120 //
1121 // If the entry is empty, skip it
1122 //
1123 if (!TrackerEntry->Key) continue;
1124
1125 //
1126 // Otherwise, add one more entry to the caller's buffer, and ensure that
1127 // enough space has been allocated in it
1128 //
1129 SystemInformation->Count++;
1130 CurrentLength += sizeof(*TagEntry);
1131 if (SystemInformationLength < CurrentLength)
1132 {
1133 //
1134 // The caller's buffer is too small, so set a failure code. The
1135 // caller will know the count, as well as how much space is needed.
1136 //
1137 // We do NOT break out of the loop, because we want to keep incrementing
1138 // the Count as well as CurrentLength so that the caller can know the
1139 // final numbers
1140 //
1141 Status = STATUS_INFO_LENGTH_MISMATCH;
1142 }
1143 else
1144 {
1145 //
1146 // Small sanity check that our accounting is working correctly
1147 //
1148 ASSERT(TrackerEntry->PagedAllocs >= TrackerEntry->PagedFrees);
1149 ASSERT(TrackerEntry->NonPagedAllocs >= TrackerEntry->NonPagedFrees);
1150
1151 //
1152 // Return the data into the caller's buffer
1153 //
1154 TagEntry->TagUlong = TrackerEntry->Key;
1155 TagEntry->PagedAllocs = TrackerEntry->PagedAllocs;
1156 TagEntry->PagedFrees = TrackerEntry->PagedFrees;
1157 TagEntry->PagedUsed = TrackerEntry->PagedBytes;
1158 TagEntry->NonPagedAllocs = TrackerEntry->NonPagedAllocs;
1159 TagEntry->NonPagedFrees = TrackerEntry->NonPagedFrees;
1160 TagEntry->NonPagedUsed = TrackerEntry->NonPagedBytes;
1161 TagEntry++;
1162 }
1163 }
1164
1165 //
1166 // Free the "Generic DPC" temporary buffer, return the buffer length and status
1167 //
1168 ExFreePool(Buffer);
1169 if (ReturnLength) *ReturnLength = CurrentLength;
1170 return Status;
1171 }
1172
1173 BOOLEAN
1174 NTAPI
1175 ExpAddTagForBigPages(IN PVOID Va,
1176 IN ULONG Key,
1177 IN ULONG NumberOfPages,
1178 IN POOL_TYPE PoolType)
1179 {
1180 ULONG Hash, i = 0;
1181 PVOID OldVa;
1182 KIRQL OldIrql;
1183 SIZE_T TableSize;
1184 PPOOL_TRACKER_BIG_PAGES Entry, EntryEnd, EntryStart;
1185 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1186 ASSERT(!(PoolType & SESSION_POOL_MASK));
1187
1188 //
1189 // As the table is expandable, these values must only be read after acquiring
1190 // the lock to avoid a teared access during an expansion
1191 //
1192 Hash = ExpComputePartialHashForAddress(Va);
1193 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1194 Hash &= PoolBigPageTableHash;
1195 TableSize = PoolBigPageTableSize;
1196
1197 //
1198 // We loop from the current hash bucket to the end of the table, and then
1199 // rollover to hash bucket 0 and keep going from there. If we return back
1200 // to the beginning, then we attempt expansion at the bottom of the loop
1201 //
1202 EntryStart = Entry = &PoolBigPageTable[Hash];
1203 EntryEnd = &PoolBigPageTable[TableSize];
1204 do
1205 {
1206 //
1207 // Make sure that this is a free entry and attempt to atomically make the
1208 // entry busy now
1209 //
1210 OldVa = Entry->Va;
1211 if (((ULONG_PTR)OldVa & POOL_BIG_TABLE_ENTRY_FREE) &&
1212 (InterlockedCompareExchangePointer(&Entry->Va, Va, OldVa) == OldVa))
1213 {
1214 //
1215 // We now own this entry, write down the size and the pool tag
1216 //
1217 Entry->Key = Key;
1218 Entry->NumberOfPages = NumberOfPages;
1219
1220 //
1221 // Add one more entry to the count, and see if we're getting within
1222 // 25% of the table size, at which point we'll do an expansion now
1223 // to avoid blocking too hard later on.
1224 //
1225 // Note that we only do this if it's also been the 16th time that we
1226 // keep losing the race or that we are not finding a free entry anymore,
1227 // which implies a massive number of concurrent big pool allocations.
1228 //
1229 InterlockedIncrement(&ExpPoolBigEntriesInUse);
1230 if ((i >= 16) && (ExpPoolBigEntriesInUse > (TableSize / 4)))
1231 {
1232 DPRINT1("Should attempt expansion since we now have %d entries\n",
1233 ExpPoolBigEntriesInUse);
1234 }
1235
1236 //
1237 // We have our entry, return
1238 //
1239 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1240 return TRUE;
1241 }
1242
1243 //
1244 // We don't have our entry yet, so keep trying, making the entry list
1245 // circular if we reach the last entry. We'll eventually break out of
1246 // the loop once we've rolled over and returned back to our original
1247 // hash bucket
1248 //
1249 i++;
1250 if (++Entry >= EntryEnd) Entry = &PoolBigPageTable[0];
1251 } while (Entry != EntryStart);
1252
1253 //
1254 // This means there's no free hash buckets whatsoever, so we would now have
1255 // to attempt expanding the table
1256 //
1257 DPRINT1("Big pool expansion needed, not implemented!");
1258 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1259 return FALSE;
1260 }
1261
1262 ULONG
1263 NTAPI
1264 ExpFindAndRemoveTagBigPages(IN PVOID Va,
1265 OUT PULONG BigPages,
1266 IN POOL_TYPE PoolType)
1267 {
1268 BOOLEAN FirstTry = TRUE;
1269 SIZE_T TableSize;
1270 KIRQL OldIrql;
1271 ULONG PoolTag, Hash;
1272 PPOOL_TRACKER_BIG_PAGES Entry;
1273 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1274 ASSERT(!(PoolType & SESSION_POOL_MASK));
1275
1276 //
1277 // As the table is expandable, these values must only be read after acquiring
1278 // the lock to avoid a teared access during an expansion
1279 //
1280 Hash = ExpComputePartialHashForAddress(Va);
1281 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1282 Hash &= PoolBigPageTableHash;
1283 TableSize = PoolBigPageTableSize;
1284
1285 //
1286 // Loop while trying to find this big page allocation
1287 //
1288 while (PoolBigPageTable[Hash].Va != Va)
1289 {
1290 //
1291 // Increment the size until we go past the end of the table
1292 //
1293 if (++Hash >= TableSize)
1294 {
1295 //
1296 // Is this the second time we've tried?
1297 //
1298 if (!FirstTry)
1299 {
1300 //
1301 // This means it was never inserted into the pool table and it
1302 // received the special "BIG" tag -- return that and return 0
1303 // so that the code can ask Mm for the page count instead
1304 //
1305 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1306 *BigPages = 0;
1307 return ' GIB';
1308 }
1309
1310 //
1311 // The first time this happens, reset the hash index and try again
1312 //
1313 Hash = 0;
1314 FirstTry = FALSE;
1315 }
1316 }
1317
1318 //
1319 // Now capture all the information we need from the entry, since after we
1320 // release the lock, the data can change
1321 //
1322 Entry = &PoolBigPageTable[Hash];
1323 *BigPages = Entry->NumberOfPages;
1324 PoolTag = Entry->Key;
1325
1326 //
1327 // Set the free bit, and decrement the number of allocations. Finally, release
1328 // the lock and return the tag that was located
1329 //
1330 InterlockedIncrement((PLONG)&Entry->Va);
1331 InterlockedDecrement(&ExpPoolBigEntriesInUse);
1332 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1333 return PoolTag;
1334 }
1335
1336 VOID
1337 NTAPI
1338 ExQueryPoolUsage(OUT PULONG PagedPoolPages,
1339 OUT PULONG NonPagedPoolPages,
1340 OUT PULONG PagedPoolAllocs,
1341 OUT PULONG PagedPoolFrees,
1342 OUT PULONG PagedPoolLookasideHits,
1343 OUT PULONG NonPagedPoolAllocs,
1344 OUT PULONG NonPagedPoolFrees,
1345 OUT PULONG NonPagedPoolLookasideHits)
1346 {
1347 ULONG i;
1348 PPOOL_DESCRIPTOR PoolDesc;
1349
1350 //
1351 // Assume all failures
1352 //
1353 *PagedPoolPages = 0;
1354 *PagedPoolAllocs = 0;
1355 *PagedPoolFrees = 0;
1356
1357 //
1358 // Tally up the totals for all the apged pool
1359 //
1360 for (i = 0; i < ExpNumberOfPagedPools + 1; i++)
1361 {
1362 PoolDesc = ExpPagedPoolDescriptor[i];
1363 *PagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1364 *PagedPoolAllocs += PoolDesc->RunningAllocs;
1365 *PagedPoolFrees += PoolDesc->RunningDeAllocs;
1366 }
1367
1368 //
1369 // The first non-paged pool has a hardcoded well-known descriptor name
1370 //
1371 PoolDesc = &NonPagedPoolDescriptor;
1372 *NonPagedPoolPages = PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1373 *NonPagedPoolAllocs = PoolDesc->RunningAllocs;
1374 *NonPagedPoolFrees = PoolDesc->RunningDeAllocs;
1375
1376 //
1377 // If the system has more than one non-paged pool, copy the other descriptor
1378 // totals as well
1379 //
1380 #if 0
1381 if (ExpNumberOfNonPagedPools > 1)
1382 {
1383 for (i = 0; i < ExpNumberOfNonPagedPools; i++)
1384 {
1385 PoolDesc = ExpNonPagedPoolDescriptor[i];
1386 *NonPagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1387 *NonPagedPoolAllocs += PoolDesc->RunningAllocs;
1388 *NonPagedPoolFrees += PoolDesc->RunningDeAllocs;
1389 }
1390 }
1391 #endif
1392
1393 //
1394 // FIXME: Not yet supported
1395 //
1396 *NonPagedPoolLookasideHits += 0;
1397 *PagedPoolLookasideHits += 0;
1398 }
1399
1400 /* PUBLIC FUNCTIONS ***********************************************************/
1401
1402 /*
1403 * @implemented
1404 */
1405 PVOID
1406 NTAPI
1407 ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
1408 IN SIZE_T NumberOfBytes,
1409 IN ULONG Tag)
1410 {
1411 PPOOL_DESCRIPTOR PoolDesc;
1412 PLIST_ENTRY ListHead;
1413 PPOOL_HEADER Entry, NextEntry, FragmentEntry;
1414 KIRQL OldIrql;
1415 USHORT BlockSize, i;
1416 ULONG OriginalType;
1417 PKPRCB Prcb = KeGetCurrentPrcb();
1418 PGENERAL_LOOKASIDE LookasideList;
1419
1420 //
1421 // Some sanity checks
1422 //
1423 ASSERT(Tag != 0);
1424 ASSERT(Tag != ' GIB');
1425 ASSERT(NumberOfBytes != 0);
1426 ExpCheckPoolIrqlLevel(PoolType, NumberOfBytes, NULL);
1427
1428 //
1429 // Not supported in ReactOS
1430 //
1431 ASSERT(!(PoolType & SESSION_POOL_MASK));
1432
1433 //
1434 // Check if verifier or special pool is enabled
1435 //
1436 if (ExpPoolFlags & (POOL_FLAG_VERIFIER | POOL_FLAG_SPECIAL_POOL))
1437 {
1438 //
1439 // For verifier, we should call the verification routine
1440 //
1441 if (ExpPoolFlags & POOL_FLAG_VERIFIER)
1442 {
1443 DPRINT1("Driver Verifier is not yet supported\n");
1444 }
1445
1446 //
1447 // For special pool, we check if this is a suitable allocation and do
1448 // the special allocation if needed
1449 //
1450 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
1451 {
1452 //
1453 // Check if this is a special pool allocation
1454 //
1455 if (MmUseSpecialPool(NumberOfBytes, Tag))
1456 {
1457 //
1458 // Try to allocate using special pool
1459 //
1460 Entry = MmAllocateSpecialPool(NumberOfBytes, Tag, PoolType, 2);
1461 if (Entry) return Entry;
1462 }
1463 }
1464 }
1465
1466 //
1467 // Get the pool type and its corresponding vector for this request
1468 //
1469 OriginalType = PoolType;
1470 PoolType = PoolType & BASE_POOL_TYPE_MASK;
1471 PoolDesc = PoolVector[PoolType];
1472 ASSERT(PoolDesc != NULL);
1473
1474 //
1475 // Check if this is a big page allocation
1476 //
1477 if (NumberOfBytes > POOL_MAX_ALLOC)
1478 {
1479 //
1480 // Allocate pages for it
1481 //
1482 Entry = MiAllocatePoolPages(OriginalType, NumberOfBytes);
1483 if (!Entry)
1484 {
1485 //
1486 // Must succeed pool is deprecated, but still supported. These allocation
1487 // failures must cause an immediate bugcheck
1488 //
1489 if (OriginalType & MUST_SUCCEED_POOL_MASK)
1490 {
1491 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1492 NumberOfBytes,
1493 NonPagedPoolDescriptor.TotalPages,
1494 NonPagedPoolDescriptor.TotalBigPages,
1495 0);
1496 }
1497
1498 //
1499 // Internal debugging
1500 //
1501 ExPoolFailures++;
1502
1503 //
1504 // This flag requests printing failures, and can also further specify
1505 // breaking on failures
1506 //
1507 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
1508 {
1509 DPRINT1("EX: ExAllocatePool (%p, 0x%x) returning NULL\n",
1510 NumberOfBytes,
1511 OriginalType);
1512 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
1513 }
1514
1515 //
1516 // Finally, this flag requests an exception, which we are more than
1517 // happy to raise!
1518 //
1519 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
1520 {
1521 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
1522 }
1523 }
1524
1525 //
1526 // Increment required counters
1527 //
1528 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages, BYTES_TO_PAGES(NumberOfBytes));
1529 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, NumberOfBytes);
1530 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
1531
1532 //
1533 // Add a tag for the big page allocation and switch to the generic "BIG"
1534 // tag if we failed to do so, then insert a tracker for this alloation.
1535 //
1536 if (!ExpAddTagForBigPages(Entry,
1537 Tag,
1538 BYTES_TO_PAGES(NumberOfBytes),
1539 OriginalType))
1540 {
1541 Tag = ' GIB';
1542 }
1543 ExpInsertPoolTracker(Tag, ROUND_TO_PAGES(NumberOfBytes), OriginalType);
1544 return Entry;
1545 }
1546
1547 //
1548 // Should never request 0 bytes from the pool, but since so many drivers do
1549 // it, we'll just assume they want 1 byte, based on NT's similar behavior
1550 //
1551 if (!NumberOfBytes) NumberOfBytes = 1;
1552
1553 //
1554 // A pool allocation is defined by its data, a linked list to connect it to
1555 // the free list (if necessary), and a pool header to store accounting info.
1556 // Calculate this size, then convert it into a block size (units of pool
1557 // headers)
1558 //
1559 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
1560 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
1561 // the direct allocation of pages.
1562 //
1563 i = (USHORT)((NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1))
1564 / POOL_BLOCK_SIZE);
1565
1566 //
1567 // Handle lookaside list optimization for both paged and nonpaged pool
1568 //
1569 if (i <= MAXIMUM_PROCESSORS)
1570 {
1571 //
1572 // Try popping it from the per-CPU lookaside list
1573 //
1574 LookasideList = (PoolType == PagedPool) ?
1575 Prcb->PPPagedLookasideList[i - 1].P :
1576 Prcb->PPNPagedLookasideList[i - 1].P;
1577 LookasideList->TotalAllocates++;
1578 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
1579 if (!Entry)
1580 {
1581 //
1582 // We failed, try popping it from the global list
1583 //
1584 LookasideList = (PoolType == PagedPool) ?
1585 Prcb->PPPagedLookasideList[i - 1].L :
1586 Prcb->PPNPagedLookasideList[i - 1].L;
1587 LookasideList->TotalAllocates++;
1588 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
1589 }
1590
1591 //
1592 // If we were able to pop it, update the accounting and return the block
1593 //
1594 if (Entry)
1595 {
1596 LookasideList->AllocateHits++;
1597
1598 //
1599 // Get the real entry, write down its pool type, and track it
1600 //
1601 Entry--;
1602 Entry->PoolType = PoolType + 1;
1603 ExpInsertPoolTracker(Tag,
1604 Entry->BlockSize * POOL_BLOCK_SIZE,
1605 OriginalType);
1606
1607 //
1608 // Return the pool allocation
1609 //
1610 Entry->PoolTag = Tag;
1611 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
1612 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
1613 return POOL_FREE_BLOCK(Entry);
1614 }
1615 }
1616
1617 //
1618 // Loop in the free lists looking for a block if this size. Start with the
1619 // list optimized for this kind of size lookup
1620 //
1621 ListHead = &PoolDesc->ListHeads[i];
1622 do
1623 {
1624 //
1625 // Are there any free entries available on this list?
1626 //
1627 if (!ExpIsPoolListEmpty(ListHead))
1628 {
1629 //
1630 // Acquire the pool lock now
1631 //
1632 OldIrql = ExLockPool(PoolDesc);
1633
1634 //
1635 // And make sure the list still has entries
1636 //
1637 if (ExpIsPoolListEmpty(ListHead))
1638 {
1639 //
1640 // Someone raced us (and won) before we had a chance to acquire
1641 // the lock.
1642 //
1643 // Try again!
1644 //
1645 ExUnlockPool(PoolDesc, OldIrql);
1646 ListHead++;
1647 continue;
1648 }
1649
1650 //
1651 // Remove a free entry from the list
1652 // Note that due to the way we insert free blocks into multiple lists
1653 // there is a guarantee that any block on this list will either be
1654 // of the correct size, or perhaps larger.
1655 //
1656 ExpCheckPoolLinks(ListHead);
1657 Entry = POOL_ENTRY(ExpRemovePoolHeadList(ListHead));
1658 ExpCheckPoolLinks(ListHead);
1659 ExpCheckPoolBlocks(Entry);
1660 ASSERT(Entry->BlockSize >= i);
1661 ASSERT(Entry->PoolType == 0);
1662
1663 //
1664 // Check if this block is larger that what we need. The block could
1665 // not possibly be smaller, due to the reason explained above (and
1666 // we would've asserted on a checked build if this was the case).
1667 //
1668 if (Entry->BlockSize != i)
1669 {
1670 //
1671 // Is there an entry before this one?
1672 //
1673 if (Entry->PreviousSize == 0)
1674 {
1675 //
1676 // There isn't anyone before us, so take the next block and
1677 // turn it into a fragment that contains the leftover data
1678 // that we don't need to satisfy the caller's request
1679 //
1680 FragmentEntry = POOL_BLOCK(Entry, i);
1681 FragmentEntry->BlockSize = Entry->BlockSize - i;
1682
1683 //
1684 // And make it point back to us
1685 //
1686 FragmentEntry->PreviousSize = i;
1687
1688 //
1689 // Now get the block that follows the new fragment and check
1690 // if it's still on the same page as us (and not at the end)
1691 //
1692 NextEntry = POOL_NEXT_BLOCK(FragmentEntry);
1693 if (PAGE_ALIGN(NextEntry) != NextEntry)
1694 {
1695 //
1696 // Adjust this next block to point to our newly created
1697 // fragment block
1698 //
1699 NextEntry->PreviousSize = FragmentEntry->BlockSize;
1700 }
1701 }
1702 else
1703 {
1704 //
1705 // There is a free entry before us, which we know is smaller
1706 // so we'll make this entry the fragment instead
1707 //
1708 FragmentEntry = Entry;
1709
1710 //
1711 // And then we'll remove from it the actual size required.
1712 // Now the entry is a leftover free fragment
1713 //
1714 Entry->BlockSize -= i;
1715
1716 //
1717 // Now let's go to the next entry after the fragment (which
1718 // used to point to our original free entry) and make it
1719 // reference the new fragment entry instead.
1720 //
1721 // This is the entry that will actually end up holding the
1722 // allocation!
1723 //
1724 Entry = POOL_NEXT_BLOCK(Entry);
1725 Entry->PreviousSize = FragmentEntry->BlockSize;
1726
1727 //
1728 // And now let's go to the entry after that one and check if
1729 // it's still on the same page, and not at the end
1730 //
1731 NextEntry = POOL_BLOCK(Entry, i);
1732 if (PAGE_ALIGN(NextEntry) != NextEntry)
1733 {
1734 //
1735 // Make it reference the allocation entry
1736 //
1737 NextEntry->PreviousSize = i;
1738 }
1739 }
1740
1741 //
1742 // Now our (allocation) entry is the right size
1743 //
1744 Entry->BlockSize = i;
1745
1746 //
1747 // And the next entry is now the free fragment which contains
1748 // the remaining difference between how big the original entry
1749 // was, and the actual size the caller needs/requested.
1750 //
1751 FragmentEntry->PoolType = 0;
1752 BlockSize = FragmentEntry->BlockSize;
1753
1754 //
1755 // Now check if enough free bytes remained for us to have a
1756 // "full" entry, which contains enough bytes for a linked list
1757 // and thus can be used for allocations (up to 8 bytes...)
1758 //
1759 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
1760 if (BlockSize != 1)
1761 {
1762 //
1763 // Insert the free entry into the free list for this size
1764 //
1765 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
1766 POOL_FREE_BLOCK(FragmentEntry));
1767 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
1768 }
1769 }
1770
1771 //
1772 // We have found an entry for this allocation, so set the pool type
1773 // and release the lock since we're done
1774 //
1775 Entry->PoolType = PoolType + 1;
1776 ExpCheckPoolBlocks(Entry);
1777 ExUnlockPool(PoolDesc, OldIrql);
1778
1779 //
1780 // Increment required counters
1781 //
1782 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
1783 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
1784
1785 //
1786 // Track this allocation
1787 //
1788 ExpInsertPoolTracker(Tag,
1789 Entry->BlockSize * POOL_BLOCK_SIZE,
1790 OriginalType);
1791
1792 //
1793 // Return the pool allocation
1794 //
1795 Entry->PoolTag = Tag;
1796 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
1797 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
1798 return POOL_FREE_BLOCK(Entry);
1799 }
1800 } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
1801
1802 //
1803 // There were no free entries left, so we have to allocate a new fresh page
1804 //
1805 Entry = MiAllocatePoolPages(PoolType, PAGE_SIZE);
1806 if (!Entry)
1807 {
1808 //
1809 // Must succeed pool is deprecated, but still supported. These allocation
1810 // failures must cause an immediate bugcheck
1811 //
1812 if (OriginalType & MUST_SUCCEED_POOL_MASK)
1813 {
1814 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1815 PAGE_SIZE,
1816 NonPagedPoolDescriptor.TotalPages,
1817 NonPagedPoolDescriptor.TotalBigPages,
1818 0);
1819 }
1820
1821 //
1822 // Internal debugging
1823 //
1824 ExPoolFailures++;
1825
1826 //
1827 // This flag requests printing failures, and can also further specify
1828 // breaking on failures
1829 //
1830 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
1831 {
1832 DPRINT1("EX: ExAllocatePool (%p, 0x%x) returning NULL\n",
1833 NumberOfBytes,
1834 OriginalType);
1835 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
1836 }
1837
1838 //
1839 // Finally, this flag requests an exception, which we are more than
1840 // happy to raise!
1841 //
1842 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
1843 {
1844 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
1845 }
1846
1847 //
1848 // Return NULL to the caller in all other cases
1849 //
1850 return NULL;
1851 }
1852
1853 //
1854 // Setup the entry data
1855 //
1856 Entry->Ulong1 = 0;
1857 Entry->BlockSize = i;
1858 Entry->PoolType = PoolType + 1;
1859
1860 //
1861 // This page will have two entries -- one for the allocation (which we just
1862 // created above), and one for the remaining free bytes, which we're about
1863 // to create now. The free bytes are the whole page minus what was allocated
1864 // and then converted into units of block headers.
1865 //
1866 BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i;
1867 FragmentEntry = POOL_BLOCK(Entry, i);
1868 FragmentEntry->Ulong1 = 0;
1869 FragmentEntry->BlockSize = BlockSize;
1870 FragmentEntry->PreviousSize = i;
1871
1872 //
1873 // Increment required counters
1874 //
1875 InterlockedIncrement((PLONG)&PoolDesc->TotalPages);
1876 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
1877
1878 //
1879 // Now check if enough free bytes remained for us to have a "full" entry,
1880 // which contains enough bytes for a linked list and thus can be used for
1881 // allocations (up to 8 bytes...)
1882 //
1883 if (FragmentEntry->BlockSize != 1)
1884 {
1885 //
1886 // Excellent -- acquire the pool lock
1887 //
1888 OldIrql = ExLockPool(PoolDesc);
1889
1890 //
1891 // And insert the free entry into the free list for this block size
1892 //
1893 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
1894 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
1895 POOL_FREE_BLOCK(FragmentEntry));
1896 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
1897
1898 //
1899 // Release the pool lock
1900 //
1901 ExpCheckPoolBlocks(Entry);
1902 ExUnlockPool(PoolDesc, OldIrql);
1903 }
1904 else
1905 {
1906 //
1907 // Simply do a sanity check
1908 //
1909 ExpCheckPoolBlocks(Entry);
1910 }
1911
1912 //
1913 // Increment performance counters and track this allocation
1914 //
1915 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
1916 ExpInsertPoolTracker(Tag,
1917 Entry->BlockSize * POOL_BLOCK_SIZE,
1918 PoolType);
1919
1920 //
1921 // And return the pool allocation
1922 //
1923 ExpCheckPoolBlocks(Entry);
1924 Entry->PoolTag = Tag;
1925 return POOL_FREE_BLOCK(Entry);
1926 }
1927
1928 /*
1929 * @implemented
1930 */
1931 PVOID
1932 NTAPI
1933 ExAllocatePool(POOL_TYPE PoolType,
1934 SIZE_T NumberOfBytes)
1935 {
1936 //
1937 // Use a default tag of "None"
1938 //
1939 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, TAG_NONE);
1940 }
1941
1942 /*
1943 * @implemented
1944 */
1945 VOID
1946 NTAPI
1947 ExFreePoolWithTag(IN PVOID P,
1948 IN ULONG TagToFree)
1949 {
1950 PPOOL_HEADER Entry, NextEntry;
1951 USHORT BlockSize;
1952 KIRQL OldIrql;
1953 POOL_TYPE PoolType;
1954 PPOOL_DESCRIPTOR PoolDesc;
1955 ULONG Tag;
1956 BOOLEAN Combined = FALSE;
1957 PFN_NUMBER PageCount, RealPageCount;
1958 PKPRCB Prcb = KeGetCurrentPrcb();
1959 PGENERAL_LOOKASIDE LookasideList;
1960
1961 //
1962 // Check if any of the debug flags are enabled
1963 //
1964 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
1965 POOL_FLAG_CHECK_WORKERS |
1966 POOL_FLAG_CHECK_RESOURCES |
1967 POOL_FLAG_VERIFIER |
1968 POOL_FLAG_CHECK_DEADLOCK |
1969 POOL_FLAG_SPECIAL_POOL))
1970 {
1971 //
1972 // Check if special pool is enabled
1973 //
1974 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
1975 {
1976 //
1977 // Check if it was allocated from a special pool
1978 //
1979 if (MmIsSpecialPoolAddress(P))
1980 {
1981 //
1982 // Was deadlock verification also enabled? We can do some extra
1983 // checks at this point
1984 //
1985 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
1986 {
1987 DPRINT1("Verifier not yet supported\n");
1988 }
1989
1990 //
1991 // It is, so handle it via special pool free routine
1992 //
1993 MmFreeSpecialPool(P);
1994 return;
1995 }
1996 }
1997
1998 //
1999 // For non-big page allocations, we'll do a bunch of checks in here
2000 //
2001 if (PAGE_ALIGN(P) != P)
2002 {
2003 //
2004 // Get the entry for this pool allocation
2005 // The pointer math here may look wrong or confusing, but it is quite right
2006 //
2007 Entry = P;
2008 Entry--;
2009
2010 //
2011 // Get the pool type
2012 //
2013 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2014
2015 //
2016 // FIXME: Many other debugging checks go here
2017 //
2018 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2019 }
2020 }
2021
2022 //
2023 // Check if this is a big page allocation
2024 //
2025 if (PAGE_ALIGN(P) == P)
2026 {
2027 //
2028 // We need to find the tag for it, so first we need to find out what
2029 // kind of allocation this was (paged or nonpaged), then we can go
2030 // ahead and try finding the tag for it. Remember to get rid of the
2031 // PROTECTED_POOL tag if it's found.
2032 //
2033 // Note that if at insertion time, we failed to add the tag for a big
2034 // pool allocation, we used a special tag called 'BIG' to identify the
2035 // allocation, and we may get this tag back. In this scenario, we must
2036 // manually get the size of the allocation by actually counting through
2037 // the PFN database.
2038 //
2039 PoolType = MmDeterminePoolType(P);
2040 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2041 Tag = ExpFindAndRemoveTagBigPages(P, &PageCount, PoolType);
2042 if (!Tag)
2043 {
2044 DPRINT1("We do not know the size of this allocation. This is not yet supported\n");
2045 ASSERT(Tag == ' GIB');
2046 PageCount = 1; // We are going to lie! This might screw up accounting?
2047 }
2048 else if (Tag & PROTECTED_POOL)
2049 {
2050 Tag &= ~PROTECTED_POOL;
2051 }
2052
2053 //
2054 // We have our tag and our page count, so we can go ahead and remove this
2055 // tracker now
2056 //
2057 ExpRemovePoolTracker(Tag, PageCount << PAGE_SHIFT, PoolType);
2058
2059 //
2060 // Check if any of the debug flags are enabled
2061 //
2062 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
2063 POOL_FLAG_CHECK_WORKERS |
2064 POOL_FLAG_CHECK_RESOURCES |
2065 POOL_FLAG_CHECK_DEADLOCK))
2066 {
2067 //
2068 // Was deadlock verification also enabled? We can do some extra
2069 // checks at this point
2070 //
2071 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2072 {
2073 DPRINT1("Verifier not yet supported\n");
2074 }
2075
2076 //
2077 // FIXME: Many debugging checks go here
2078 //
2079 }
2080
2081 //
2082 // Update counters
2083 //
2084 PoolDesc = PoolVector[PoolType];
2085 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2086 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, -PageCount << PAGE_SHIFT);
2087
2088 //
2089 // Do the real free now and update the last counter with the big page count
2090 //
2091 RealPageCount = MiFreePoolPages(P);
2092 ASSERT(RealPageCount == PageCount);
2093 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages, -RealPageCount);
2094 return;
2095 }
2096
2097 //
2098 // Get the entry for this pool allocation
2099 // The pointer math here may look wrong or confusing, but it is quite right
2100 //
2101 Entry = P;
2102 Entry--;
2103
2104 //
2105 // Get the size of the entry, and it's pool type, then load the descriptor
2106 // for this pool type
2107 //
2108 BlockSize = Entry->BlockSize;
2109 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2110 PoolDesc = PoolVector[PoolType];
2111
2112 //
2113 // Make sure that the IRQL makes sense
2114 //
2115 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2116
2117 //
2118 // Get the pool tag and get rid of the PROTECTED_POOL flag
2119 //
2120 Tag = Entry->PoolTag;
2121 if (Tag & PROTECTED_POOL) Tag &= ~PROTECTED_POOL;
2122
2123 //
2124 // Stop tracking this allocation
2125 //
2126 ExpRemovePoolTracker(Tag,
2127 BlockSize * POOL_BLOCK_SIZE,
2128 Entry->PoolType - 1);
2129
2130 //
2131 // Is this allocation small enough to have come from a lookaside list?
2132 //
2133 if (BlockSize <= MAXIMUM_PROCESSORS)
2134 {
2135 //
2136 // Try pushing it into the per-CPU lookaside list
2137 //
2138 LookasideList = (PoolType == PagedPool) ?
2139 Prcb->PPPagedLookasideList[BlockSize - 1].P :
2140 Prcb->PPNPagedLookasideList[BlockSize - 1].P;
2141 LookasideList->TotalFrees++;
2142 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2143 {
2144 LookasideList->FreeHits++;
2145 InterlockedPushEntrySList(&LookasideList->ListHead, P);
2146 return;
2147 }
2148
2149 //
2150 // We failed, try to push it into the global lookaside list
2151 //
2152 LookasideList = (PoolType == PagedPool) ?
2153 Prcb->PPPagedLookasideList[BlockSize - 1].L :
2154 Prcb->PPNPagedLookasideList[BlockSize - 1].L;
2155 LookasideList->TotalFrees++;
2156 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2157 {
2158 LookasideList->FreeHits++;
2159 InterlockedPushEntrySList(&LookasideList->ListHead, P);
2160 return;
2161 }
2162 }
2163
2164 //
2165 // Get the pointer to the next entry
2166 //
2167 NextEntry = POOL_BLOCK(Entry, BlockSize);
2168
2169 //
2170 // Update performance counters
2171 //
2172 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2173 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, -BlockSize * POOL_BLOCK_SIZE);
2174
2175 //
2176 // Acquire the pool lock
2177 //
2178 OldIrql = ExLockPool(PoolDesc);
2179
2180 //
2181 // Check block tag
2182 //
2183 if (TagToFree && TagToFree != Entry->PoolTag)
2184 {
2185 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Entry->PoolTag);
2186 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Entry->PoolTag, TagToFree);
2187 }
2188
2189 //
2190 // Check if the next allocation is at the end of the page
2191 //
2192 ExpCheckPoolBlocks(Entry);
2193 if (PAGE_ALIGN(NextEntry) != NextEntry)
2194 {
2195 //
2196 // We may be able to combine the block if it's free
2197 //
2198 if (NextEntry->PoolType == 0)
2199 {
2200 //
2201 // The next block is free, so we'll do a combine
2202 //
2203 Combined = TRUE;
2204
2205 //
2206 // Make sure there's actual data in the block -- anything smaller
2207 // than this means we only have the header, so there's no linked list
2208 // for us to remove
2209 //
2210 if ((NextEntry->BlockSize != 1))
2211 {
2212 //
2213 // The block is at least big enough to have a linked list, so go
2214 // ahead and remove it
2215 //
2216 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2217 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2218 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2219 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2220 }
2221
2222 //
2223 // Our entry is now combined with the next entry
2224 //
2225 Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
2226 }
2227 }
2228
2229 //
2230 // Now check if there was a previous entry on the same page as us
2231 //
2232 if (Entry->PreviousSize)
2233 {
2234 //
2235 // Great, grab that entry and check if it's free
2236 //
2237 NextEntry = POOL_PREV_BLOCK(Entry);
2238 if (NextEntry->PoolType == 0)
2239 {
2240 //
2241 // It is, so we can do a combine
2242 //
2243 Combined = TRUE;
2244
2245 //
2246 // Make sure there's actual data in the block -- anything smaller
2247 // than this means we only have the header so there's no linked list
2248 // for us to remove
2249 //
2250 if ((NextEntry->BlockSize != 1))
2251 {
2252 //
2253 // The block is at least big enough to have a linked list, so go
2254 // ahead and remove it
2255 //
2256 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2257 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2258 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2259 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2260 }
2261
2262 //
2263 // Combine our original block (which might've already been combined
2264 // with the next block), into the previous block
2265 //
2266 NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
2267
2268 //
2269 // And now we'll work with the previous block instead
2270 //
2271 Entry = NextEntry;
2272 }
2273 }
2274
2275 //
2276 // By now, it may have been possible for our combined blocks to actually
2277 // have made up a full page (if there were only 2-3 allocations on the
2278 // page, they could've all been combined).
2279 //
2280 if ((PAGE_ALIGN(Entry) == Entry) &&
2281 (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry)) == POOL_NEXT_BLOCK(Entry)))
2282 {
2283 //
2284 // In this case, release the pool lock, update the performance counter,
2285 // and free the page
2286 //
2287 ExUnlockPool(PoolDesc, OldIrql);
2288 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalPages, -1);
2289 MiFreePoolPages(Entry);
2290 return;
2291 }
2292
2293 //
2294 // Otherwise, we now have a free block (or a combination of 2 or 3)
2295 //
2296 Entry->PoolType = 0;
2297 BlockSize = Entry->BlockSize;
2298 ASSERT(BlockSize != 1);
2299
2300 //
2301 // Check if we actually did combine it with anyone
2302 //
2303 if (Combined)
2304 {
2305 //
2306 // Get the first combined block (either our original to begin with, or
2307 // the one after the original, depending if we combined with the previous)
2308 //
2309 NextEntry = POOL_NEXT_BLOCK(Entry);
2310
2311 //
2312 // As long as the next block isn't on a page boundary, have it point
2313 // back to us
2314 //
2315 if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
2316 }
2317
2318 //
2319 // Insert this new free block, and release the pool lock
2320 //
2321 ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
2322 ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry));
2323 ExUnlockPool(PoolDesc, OldIrql);
2324 }
2325
2326 /*
2327 * @implemented
2328 */
2329 VOID
2330 NTAPI
2331 ExFreePool(PVOID P)
2332 {
2333 //
2334 // Just free without checking for the tag
2335 //
2336 ExFreePoolWithTag(P, 0);
2337 }
2338
2339 /*
2340 * @unimplemented
2341 */
2342 SIZE_T
2343 NTAPI
2344 ExQueryPoolBlockSize(IN PVOID PoolBlock,
2345 OUT PBOOLEAN QuotaCharged)
2346 {
2347 //
2348 // Not implemented
2349 //
2350 UNIMPLEMENTED;
2351 return FALSE;
2352 }
2353
2354 /*
2355 * @implemented
2356 */
2357
2358 PVOID
2359 NTAPI
2360 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType,
2361 IN SIZE_T NumberOfBytes)
2362 {
2363 //
2364 // Allocate the pool
2365 //
2366 return ExAllocatePoolWithQuotaTag(PoolType, NumberOfBytes, 'enoN');
2367 }
2368
2369 /*
2370 * @implemented
2371 */
2372 PVOID
2373 NTAPI
2374 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType,
2375 IN SIZE_T NumberOfBytes,
2376 IN ULONG Tag,
2377 IN EX_POOL_PRIORITY Priority)
2378 {
2379 //
2380 // Allocate the pool
2381 //
2382 UNIMPLEMENTED;
2383 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2384 }
2385
2386 /*
2387 * @implemented
2388 */
2389 PVOID
2390 NTAPI
2391 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType,
2392 IN SIZE_T NumberOfBytes,
2393 IN ULONG Tag)
2394 {
2395 //
2396 // Allocate the pool
2397 //
2398 UNIMPLEMENTED;
2399 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2400 }
2401
2402 /* EOF */