- Synchronize up to trunk's revision r57864.
[reactos.git] / ntoskrnl / mm / ARM3 / expool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include "../ARM3/miarm.h"
17
18 #undef ExAllocatePoolWithQuota
19 #undef ExAllocatePoolWithQuotaTag
20
21 /* GLOBALS ********************************************************************/
22
23 #define POOL_BIG_TABLE_ENTRY_FREE 0x1
24
25 typedef struct _POOL_DPC_CONTEXT
26 {
27 PPOOL_TRACKER_TABLE PoolTrackTable;
28 SIZE_T PoolTrackTableSize;
29 PPOOL_TRACKER_TABLE PoolTrackTableExpansion;
30 SIZE_T PoolTrackTableSizeExpansion;
31 } POOL_DPC_CONTEXT, *PPOOL_DPC_CONTEXT;
32
33 ULONG ExpNumberOfPagedPools;
34 POOL_DESCRIPTOR NonPagedPoolDescriptor;
35 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16 + 1];
36 PPOOL_DESCRIPTOR PoolVector[2];
37 PKGUARDED_MUTEX ExpPagedPoolMutex;
38 SIZE_T PoolTrackTableSize, PoolTrackTableMask;
39 SIZE_T PoolBigPageTableSize, PoolBigPageTableHash;
40 PPOOL_TRACKER_TABLE PoolTrackTable;
41 PPOOL_TRACKER_BIG_PAGES PoolBigPageTable;
42 KSPIN_LOCK ExpTaggedPoolLock;
43 ULONG PoolHitTag;
44 BOOLEAN ExStopBadTags;
45 KSPIN_LOCK ExpLargePoolTableLock;
46 ULONG ExpPoolBigEntriesInUse;
47 ULONG ExpPoolFlags;
48 ULONG ExPoolFailures;
49
50 /* Pool block/header/list access macros */
51 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
52 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER))
53 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
54 #define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
55 #define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize))
56
57 /*
58 * Pool list access debug macros, similar to Arthur's pfnlist.c work.
59 * Microsoft actually implements similar checks in the Windows Server 2003 SP1
60 * pool code, but only for checked builds.
61 *
62 * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
63 * that these checks are done even on retail builds, due to the increasing
64 * number of kernel-mode attacks which depend on dangling list pointers and other
65 * kinds of list-based attacks.
66 *
67 * For now, I will leave these checks on all the time, but later they are likely
68 * to be DBG-only, at least until there are enough kernel-mode security attacks
69 * against ReactOS to warrant the performance hit.
70 *
71 * For now, these are not made inline, so we can get good stack traces.
72 */
73 PLIST_ENTRY
74 NTAPI
75 ExpDecodePoolLink(IN PLIST_ENTRY Link)
76 {
77 return (PLIST_ENTRY)((ULONG_PTR)Link & ~1);
78 }
79
80 PLIST_ENTRY
81 NTAPI
82 ExpEncodePoolLink(IN PLIST_ENTRY Link)
83 {
84 return (PLIST_ENTRY)((ULONG_PTR)Link | 1);
85 }
86
87 VOID
88 NTAPI
89 ExpCheckPoolLinks(IN PLIST_ENTRY ListHead)
90 {
91 if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) ||
92 (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead))
93 {
94 KeBugCheckEx(BAD_POOL_HEADER,
95 3,
96 (ULONG_PTR)ListHead,
97 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink),
98 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink));
99 }
100 }
101
102 VOID
103 NTAPI
104 ExpInitializePoolListHead(IN PLIST_ENTRY ListHead)
105 {
106 ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead);
107 }
108
109 BOOLEAN
110 NTAPI
111 ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead)
112 {
113 return (ExpDecodePoolLink(ListHead->Flink) == ListHead);
114 }
115
116 VOID
117 NTAPI
118 ExpRemovePoolEntryList(IN PLIST_ENTRY Entry)
119 {
120 PLIST_ENTRY Blink, Flink;
121 Flink = ExpDecodePoolLink(Entry->Flink);
122 Blink = ExpDecodePoolLink(Entry->Blink);
123 Flink->Blink = ExpEncodePoolLink(Blink);
124 Blink->Flink = ExpEncodePoolLink(Flink);
125 }
126
127 PLIST_ENTRY
128 NTAPI
129 ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead)
130 {
131 PLIST_ENTRY Entry, Flink;
132 Entry = ExpDecodePoolLink(ListHead->Flink);
133 Flink = ExpDecodePoolLink(Entry->Flink);
134 ListHead->Flink = ExpEncodePoolLink(Flink);
135 Flink->Blink = ExpEncodePoolLink(ListHead);
136 return Entry;
137 }
138
139 PLIST_ENTRY
140 NTAPI
141 ExpRemovePoolTailList(IN PLIST_ENTRY ListHead)
142 {
143 PLIST_ENTRY Entry, Blink;
144 Entry = ExpDecodePoolLink(ListHead->Blink);
145 Blink = ExpDecodePoolLink(Entry->Blink);
146 ListHead->Blink = ExpEncodePoolLink(Blink);
147 Blink->Flink = ExpEncodePoolLink(ListHead);
148 return Entry;
149 }
150
151 VOID
152 NTAPI
153 ExpInsertPoolTailList(IN PLIST_ENTRY ListHead,
154 IN PLIST_ENTRY Entry)
155 {
156 PLIST_ENTRY Blink;
157 ExpCheckPoolLinks(ListHead);
158 Blink = ExpDecodePoolLink(ListHead->Blink);
159 Entry->Flink = ExpEncodePoolLink(ListHead);
160 Entry->Blink = ExpEncodePoolLink(Blink);
161 Blink->Flink = ExpEncodePoolLink(Entry);
162 ListHead->Blink = ExpEncodePoolLink(Entry);
163 ExpCheckPoolLinks(ListHead);
164 }
165
166 VOID
167 NTAPI
168 ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead,
169 IN PLIST_ENTRY Entry)
170 {
171 PLIST_ENTRY Flink;
172 ExpCheckPoolLinks(ListHead);
173 Flink = ExpDecodePoolLink(ListHead->Flink);
174 Entry->Flink = ExpEncodePoolLink(Flink);
175 Entry->Blink = ExpEncodePoolLink(ListHead);
176 Flink->Blink = ExpEncodePoolLink(Entry);
177 ListHead->Flink = ExpEncodePoolLink(Entry);
178 ExpCheckPoolLinks(ListHead);
179 }
180
181 VOID
182 NTAPI
183 ExpCheckPoolHeader(IN PPOOL_HEADER Entry)
184 {
185 PPOOL_HEADER PreviousEntry, NextEntry;
186
187 /* Is there a block before this one? */
188 if (Entry->PreviousSize)
189 {
190 /* Get it */
191 PreviousEntry = POOL_PREV_BLOCK(Entry);
192
193 /* The two blocks must be on the same page! */
194 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry))
195 {
196 /* Something is awry */
197 KeBugCheckEx(BAD_POOL_HEADER,
198 6,
199 (ULONG_PTR)PreviousEntry,
200 __LINE__,
201 (ULONG_PTR)Entry);
202 }
203
204 /* This block should also indicate that it's as large as we think it is */
205 if (PreviousEntry->BlockSize != Entry->PreviousSize)
206 {
207 /* Otherwise, someone corrupted one of the sizes */
208 DPRINT1("PreviousEntry BlockSize %lu, tag %.4s. Entry PreviousSize %lu, tag %.4s\n",
209 PreviousEntry->BlockSize, (char *)&PreviousEntry->PoolTag,
210 Entry->PreviousSize, (char *)&Entry->PoolTag);
211 KeBugCheckEx(BAD_POOL_HEADER,
212 5,
213 (ULONG_PTR)PreviousEntry,
214 __LINE__,
215 (ULONG_PTR)Entry);
216 }
217 }
218 else if (PAGE_ALIGN(Entry) != Entry)
219 {
220 /* If there's no block before us, we are the first block, so we should be on a page boundary */
221 KeBugCheckEx(BAD_POOL_HEADER,
222 7,
223 0,
224 __LINE__,
225 (ULONG_PTR)Entry);
226 }
227
228 /* This block must have a size */
229 if (!Entry->BlockSize)
230 {
231 /* Someone must've corrupted this field */
232 if (Entry->PreviousSize)
233 {
234 PreviousEntry = POOL_PREV_BLOCK(Entry);
235 DPRINT1("PreviousEntry tag %.4s. Entry tag %.4s\n",
236 (char *)&PreviousEntry->PoolTag,
237 (char *)&Entry->PoolTag);
238 }
239 else
240 {
241 DPRINT1("Entry tag %.4s\n",
242 (char *)&Entry->PoolTag);
243 }
244 KeBugCheckEx(BAD_POOL_HEADER,
245 8,
246 0,
247 __LINE__,
248 (ULONG_PTR)Entry);
249 }
250
251 /* Okay, now get the next block */
252 NextEntry = POOL_NEXT_BLOCK(Entry);
253
254 /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
255 if (PAGE_ALIGN(NextEntry) != NextEntry)
256 {
257 /* The two blocks must be on the same page! */
258 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry))
259 {
260 /* Something is messed up */
261 KeBugCheckEx(BAD_POOL_HEADER,
262 9,
263 (ULONG_PTR)NextEntry,
264 __LINE__,
265 (ULONG_PTR)Entry);
266 }
267
268 /* And this block should think we are as large as we truly are */
269 if (NextEntry->PreviousSize != Entry->BlockSize)
270 {
271 /* Otherwise, someone corrupted the field */
272 DPRINT1("Entry BlockSize %lu, tag %.4s. NextEntry PreviousSize %lu, tag %.4s\n",
273 Entry->BlockSize, (char *)&Entry->PoolTag,
274 NextEntry->PreviousSize, (char *)&NextEntry->PoolTag);
275 KeBugCheckEx(BAD_POOL_HEADER,
276 5,
277 (ULONG_PTR)NextEntry,
278 __LINE__,
279 (ULONG_PTR)Entry);
280 }
281 }
282 }
283
284 VOID
285 NTAPI
286 ExpCheckPoolBlocks(IN PVOID Block)
287 {
288 BOOLEAN FoundBlock = FALSE;
289 SIZE_T Size = 0;
290 PPOOL_HEADER Entry;
291
292 /* Get the first entry for this page, make sure it really is the first */
293 Entry = PAGE_ALIGN(Block);
294 ASSERT(Entry->PreviousSize == 0);
295
296 /* Now scan each entry */
297 while (TRUE)
298 {
299 /* When we actually found our block, remember this */
300 if (Entry == Block) FoundBlock = TRUE;
301
302 /* Now validate this block header */
303 ExpCheckPoolHeader(Entry);
304
305 /* And go to the next one, keeping track of our size */
306 Size += Entry->BlockSize;
307 Entry = POOL_NEXT_BLOCK(Entry);
308
309 /* If we hit the last block, stop */
310 if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break;
311
312 /* If we hit the end of the page, stop */
313 if (PAGE_ALIGN(Entry) == Entry) break;
314 }
315
316 /* We must've found our block, and we must have hit the end of the page */
317 if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock))
318 {
319 /* Otherwise, the blocks are messed up */
320 KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry);
321 }
322 }
323
324 FORCEINLINE
325 VOID
326 ExpCheckPoolIrqlLevel(IN POOL_TYPE PoolType,
327 IN SIZE_T NumberOfBytes,
328 IN PVOID Entry)
329 {
330 //
331 // Validate IRQL: It must be APC_LEVEL or lower for Paged Pool, and it must
332 // be DISPATCH_LEVEL or lower for Non Paged Pool
333 //
334 if (((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) ?
335 (KeGetCurrentIrql() > APC_LEVEL) :
336 (KeGetCurrentIrql() > DISPATCH_LEVEL))
337 {
338 //
339 // Take the system down
340 //
341 KeBugCheckEx(BAD_POOL_CALLER,
342 !Entry ? POOL_ALLOC_IRQL_INVALID : POOL_FREE_IRQL_INVALID,
343 KeGetCurrentIrql(),
344 PoolType,
345 !Entry ? NumberOfBytes : (ULONG_PTR)Entry);
346 }
347 }
348
349 FORCEINLINE
350 ULONG
351 ExpComputeHashForTag(IN ULONG Tag,
352 IN SIZE_T BucketMask)
353 {
354 //
355 // Compute the hash by multiplying with a large prime number and then XORing
356 // with the HIDWORD of the result.
357 //
358 // Finally, AND with the bucket mask to generate a valid index/bucket into
359 // the table
360 //
361 ULONGLONG Result = (ULONGLONG)40543 * Tag;
362 return (ULONG)BucketMask & ((ULONG)Result ^ (Result >> 32));
363 }
364
365 FORCEINLINE
366 ULONG
367 ExpComputePartialHashForAddress(IN PVOID BaseAddress)
368 {
369 ULONG Result;
370 //
371 // Compute the hash by converting the address into a page number, and then
372 // XORing each nibble with the next one.
373 //
374 // We do *NOT* AND with the bucket mask at this point because big table expansion
375 // might happen. Therefore, the final step of the hash must be performed
376 // while holding the expansion pushlock, and this is why we call this a
377 // "partial" hash only.
378 //
379 Result = (ULONG)((ULONG_PTR)BaseAddress >> PAGE_SHIFT);
380 return (Result >> 24) ^ (Result >> 16) ^ (Result >> 8) ^ Result;
381 }
382
383 /* PRIVATE FUNCTIONS **********************************************************/
384
385 VOID
386 NTAPI
387 INIT_FUNCTION
388 ExpSeedHotTags(VOID)
389 {
390 ULONG i, Key, Hash, Index;
391 PPOOL_TRACKER_TABLE TrackTable = PoolTrackTable;
392 ULONG TagList[] =
393 {
394 ' oI',
395 ' laH',
396 'PldM',
397 'LooP',
398 'tSbO',
399 ' prI',
400 'bdDN',
401 'LprI',
402 'pOoI',
403 ' ldM',
404 'eliF',
405 'aVMC',
406 'dSeS',
407 'CFtN',
408 'looP',
409 'rPCT',
410 'bNMC',
411 'dTeS',
412 'sFtN',
413 'TPCT',
414 'CPCT',
415 ' yeK',
416 'qSbO',
417 'mNoI',
418 'aEoI',
419 'cPCT',
420 'aFtN',
421 '0ftN',
422 'tceS',
423 'SprI',
424 'ekoT',
425 ' eS',
426 'lCbO',
427 'cScC',
428 'lFtN',
429 'cAeS',
430 'mfSF',
431 'kWcC',
432 'miSF',
433 'CdfA',
434 'EdfA',
435 'orSF',
436 'nftN',
437 'PRIU',
438 'rFpN',
439 'RFpN',
440 'aPeS',
441 'sUeS',
442 'FpcA',
443 'MpcA',
444 'cSeS',
445 'mNbO',
446 'sFpN',
447 'uLeS',
448 'DPcS',
449 'nevE',
450 'vrqR',
451 'ldaV',
452 ' pP',
453 'SdaV',
454 ' daV',
455 'LdaV',
456 'FdaV',
457 ' GIB',
458 };
459
460 //
461 // Loop all 64 hot tags
462 //
463 ASSERT((sizeof(TagList) / sizeof(ULONG)) == 64);
464 for (i = 0; i < sizeof(TagList) / sizeof(ULONG); i++)
465 {
466 //
467 // Get the current tag, and compute its hash in the tracker table
468 //
469 Key = TagList[i];
470 Hash = ExpComputeHashForTag(Key, PoolTrackTableMask);
471
472 //
473 // Loop all the hashes in this index/bucket
474 //
475 Index = Hash;
476 while (TRUE)
477 {
478 //
479 // Find an empty entry, and make sure this isn't the last hash that
480 // can fit.
481 //
482 // On checked builds, also make sure this is the first time we are
483 // seeding this tag.
484 //
485 ASSERT(TrackTable[Hash].Key != Key);
486 if (!(TrackTable[Hash].Key) && (Hash != PoolTrackTableSize - 1))
487 {
488 //
489 // It has been seeded, move on to the next tag
490 //
491 TrackTable[Hash].Key = Key;
492 break;
493 }
494
495 //
496 // This entry was already taken, compute the next possible hash while
497 // making sure we're not back at our initial index.
498 //
499 ASSERT(TrackTable[Hash].Key != Key);
500 Hash = (Hash + 1) & PoolTrackTableMask;
501 if (Hash == Index) break;
502 }
503 }
504 }
505
506 VOID
507 NTAPI
508 ExpRemovePoolTracker(IN ULONG Key,
509 IN SIZE_T NumberOfBytes,
510 IN POOL_TYPE PoolType)
511 {
512 ULONG Hash, Index;
513 PPOOL_TRACKER_TABLE Table, TableEntry;
514 SIZE_T TableMask, TableSize;
515
516 //
517 // Remove the PROTECTED_POOL flag which is not part of the tag
518 //
519 Key &= ~PROTECTED_POOL;
520
521 //
522 // With WinDBG you can set a tag you want to break on when an allocation is
523 // attempted
524 //
525 if (Key == PoolHitTag) DbgBreakPoint();
526
527 //
528 // Why the double indirection? Because normally this function is also used
529 // when doing session pool allocations, which has another set of tables,
530 // sizes, and masks that live in session pool. Now we don't support session
531 // pool so we only ever use the regular tables, but I'm keeping the code this
532 // way so that the day we DO support session pool, it won't require that
533 // many changes
534 //
535 Table = PoolTrackTable;
536 TableMask = PoolTrackTableMask;
537 TableSize = PoolTrackTableSize;
538
539 //
540 // Compute the hash for this key, and loop all the possible buckets
541 //
542 Hash = ExpComputeHashForTag(Key, TableMask);
543 Index = Hash;
544 while (TRUE)
545 {
546 //
547 // Have we found the entry for this tag? */
548 //
549 TableEntry = &Table[Hash];
550 if (TableEntry->Key == Key)
551 {
552 //
553 // Decrement the counters depending on if this was paged or nonpaged
554 // pool
555 //
556 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
557 {
558 InterlockedIncrement(&TableEntry->NonPagedFrees);
559 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes,
560 -(SSIZE_T)NumberOfBytes);
561 return;
562 }
563 InterlockedIncrement(&TableEntry->PagedFrees);
564 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes,
565 -(SSIZE_T)NumberOfBytes);
566 return;
567 }
568
569 //
570 // We should have only ended up with an empty entry if we've reached
571 // the last bucket
572 //
573 if (!TableEntry->Key) ASSERT(Hash == TableMask);
574
575 //
576 // This path is hit when we don't have an entry, and the current bucket
577 // is full, so we simply try the next one
578 //
579 Hash = (Hash + 1) & TableMask;
580 if (Hash == Index) break;
581 }
582
583 //
584 // And finally this path is hit when all the buckets are full, and we need
585 // some expansion. This path is not yet supported in ReactOS and so we'll
586 // ignore the tag
587 //
588 DPRINT1("Out of pool tag space, ignoring...\n");
589 }
590
591 VOID
592 NTAPI
593 ExpInsertPoolTracker(IN ULONG Key,
594 IN SIZE_T NumberOfBytes,
595 IN POOL_TYPE PoolType)
596 {
597 ULONG Hash, Index;
598 KIRQL OldIrql;
599 PPOOL_TRACKER_TABLE Table, TableEntry;
600 SIZE_T TableMask, TableSize;
601
602 //
603 // Remove the PROTECTED_POOL flag which is not part of the tag
604 //
605 Key &= ~PROTECTED_POOL;
606
607 //
608 // With WinDBG you can set a tag you want to break on when an allocation is
609 // attempted
610 //
611 if (Key == PoolHitTag) DbgBreakPoint();
612
613 //
614 // There is also an internal flag you can set to break on malformed tags
615 //
616 if (ExStopBadTags) ASSERT(Key & 0xFFFFFF00);
617
618 //
619 // ASSERT on ReactOS features not yet supported
620 //
621 ASSERT(!(PoolType & SESSION_POOL_MASK));
622 ASSERT(KeGetCurrentProcessorNumber() == 0);
623
624 //
625 // Why the double indirection? Because normally this function is also used
626 // when doing session pool allocations, which has another set of tables,
627 // sizes, and masks that live in session pool. Now we don't support session
628 // pool so we only ever use the regular tables, but I'm keeping the code this
629 // way so that the day we DO support session pool, it won't require that
630 // many changes
631 //
632 Table = PoolTrackTable;
633 TableMask = PoolTrackTableMask;
634 TableSize = PoolTrackTableSize;
635
636 //
637 // Compute the hash for this key, and loop all the possible buckets
638 //
639 Hash = ExpComputeHashForTag(Key, TableMask);
640 Index = Hash;
641 while (TRUE)
642 {
643 //
644 // Do we already have an entry for this tag? */
645 //
646 TableEntry = &Table[Hash];
647 if (TableEntry->Key == Key)
648 {
649 //
650 // Increment the counters depending on if this was paged or nonpaged
651 // pool
652 //
653 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
654 {
655 InterlockedIncrement(&TableEntry->NonPagedAllocs);
656 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes, NumberOfBytes);
657 return;
658 }
659 InterlockedIncrement(&TableEntry->PagedAllocs);
660 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes, NumberOfBytes);
661 return;
662 }
663
664 //
665 // We don't have an entry yet, but we've found a free bucket for it
666 //
667 if (!(TableEntry->Key) && (Hash != PoolTrackTableSize - 1))
668 {
669 //
670 // We need to hold the lock while creating a new entry, since other
671 // processors might be in this code path as well
672 //
673 ExAcquireSpinLock(&ExpTaggedPoolLock, &OldIrql);
674 if (!PoolTrackTable[Hash].Key)
675 {
676 //
677 // We've won the race, so now create this entry in the bucket
678 //
679 ASSERT(Table[Hash].Key == 0);
680 PoolTrackTable[Hash].Key = Key;
681 TableEntry->Key = Key;
682 }
683 ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql);
684
685 //
686 // Now we force the loop to run again, and we should now end up in
687 // the code path above which does the interlocked increments...
688 //
689 continue;
690 }
691
692 //
693 // This path is hit when we don't have an entry, and the current bucket
694 // is full, so we simply try the next one
695 //
696 Hash = (Hash + 1) & TableMask;
697 if (Hash == Index) break;
698 }
699
700 //
701 // And finally this path is hit when all the buckets are full, and we need
702 // some expansion. This path is not yet supported in ReactOS and so we'll
703 // ignore the tag
704 //
705 DPRINT1("Out of pool tag space, ignoring...\n");
706 }
707
708 VOID
709 NTAPI
710 INIT_FUNCTION
711 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor,
712 IN POOL_TYPE PoolType,
713 IN ULONG PoolIndex,
714 IN ULONG Threshold,
715 IN PVOID PoolLock)
716 {
717 PLIST_ENTRY NextEntry, LastEntry;
718
719 //
720 // Setup the descriptor based on the caller's request
721 //
722 PoolDescriptor->PoolType = PoolType;
723 PoolDescriptor->PoolIndex = PoolIndex;
724 PoolDescriptor->Threshold = Threshold;
725 PoolDescriptor->LockAddress = PoolLock;
726
727 //
728 // Initialize accounting data
729 //
730 PoolDescriptor->RunningAllocs = 0;
731 PoolDescriptor->RunningDeAllocs = 0;
732 PoolDescriptor->TotalPages = 0;
733 PoolDescriptor->TotalBytes = 0;
734 PoolDescriptor->TotalBigPages = 0;
735
736 //
737 // Nothing pending for now
738 //
739 PoolDescriptor->PendingFrees = NULL;
740 PoolDescriptor->PendingFreeDepth = 0;
741
742 //
743 // Loop all the descriptor's allocation lists and initialize them
744 //
745 NextEntry = PoolDescriptor->ListHeads;
746 LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
747 while (NextEntry < LastEntry)
748 {
749 ExpInitializePoolListHead(NextEntry);
750 NextEntry++;
751 }
752
753 //
754 // Note that ReactOS does not support Session Pool Yet
755 //
756 ASSERT(PoolType != PagedPoolSession);
757 }
758
759 VOID
760 NTAPI
761 INIT_FUNCTION
762 InitializePool(IN POOL_TYPE PoolType,
763 IN ULONG Threshold)
764 {
765 PPOOL_DESCRIPTOR Descriptor;
766 SIZE_T TableSize;
767 ULONG i;
768
769 //
770 // Check what kind of pool this is
771 //
772 if (PoolType == NonPagedPool)
773 {
774 //
775 // Compute the track table size and convert it from a power of two to an
776 // actual byte size
777 //
778 // NOTE: On checked builds, we'll assert if the registry table size was
779 // invalid, while on retail builds we'll just break out of the loop at
780 // that point.
781 //
782 TableSize = min(PoolTrackTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
783 for (i = 0; i < 32; i++)
784 {
785 if (TableSize & 1)
786 {
787 ASSERT((TableSize & ~1) == 0);
788 if (!(TableSize & ~1)) break;
789 }
790 TableSize >>= 1;
791 }
792
793 //
794 // If we hit bit 32, than no size was defined in the registry, so
795 // we'll use the default size of 2048 entries.
796 //
797 // Otherwise, use the size from the registry, as long as it's not
798 // smaller than 64 entries.
799 //
800 if (i == 32)
801 {
802 PoolTrackTableSize = 2048;
803 }
804 else
805 {
806 PoolTrackTableSize = max(1 << i, 64);
807 }
808
809 //
810 // Loop trying with the biggest specified size first, and cut it down
811 // by a power of two each iteration in case not enough memory exist
812 //
813 while (TRUE)
814 {
815 //
816 // Do not allow overflow
817 //
818 if ((PoolTrackTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_TABLE)))
819 {
820 PoolTrackTableSize >>= 1;
821 continue;
822 }
823
824 //
825 // Allocate the tracker table and exit the loop if this worked
826 //
827 PoolTrackTable = MiAllocatePoolPages(NonPagedPool,
828 (PoolTrackTableSize + 1) *
829 sizeof(POOL_TRACKER_TABLE));
830 if (PoolTrackTable) break;
831
832 //
833 // Otherwise, as long as we're not down to the last bit, keep
834 // iterating
835 //
836 if (PoolTrackTableSize == 1)
837 {
838 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
839 TableSize,
840 0xFFFFFFFF,
841 0xFFFFFFFF,
842 0xFFFFFFFF);
843 }
844 PoolTrackTableSize >>= 1;
845 }
846
847 //
848 // Finally, add one entry, compute the hash, and zero the table
849 //
850 PoolTrackTableSize++;
851 PoolTrackTableMask = PoolTrackTableSize - 2;
852
853 RtlZeroMemory(PoolTrackTable,
854 PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
855
856 //
857 // We now do the exact same thing with the tracker table for big pages
858 //
859 TableSize = min(PoolBigPageTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
860 for (i = 0; i < 32; i++)
861 {
862 if (TableSize & 1)
863 {
864 ASSERT((TableSize & ~1) == 0);
865 if (!(TableSize & ~1)) break;
866 }
867 TableSize >>= 1;
868 }
869
870 //
871 // For big pages, the default tracker table is 4096 entries, while the
872 // minimum is still 64
873 //
874 if (i == 32)
875 {
876 PoolBigPageTableSize = 4096;
877 }
878 else
879 {
880 PoolBigPageTableSize = max(1 << i, 64);
881 }
882
883 //
884 // Again, run the exact same loop we ran earlier, but this time for the
885 // big pool tracker instead
886 //
887 while (TRUE)
888 {
889 if ((PoolBigPageTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_BIG_PAGES)))
890 {
891 PoolBigPageTableSize >>= 1;
892 continue;
893 }
894
895 PoolBigPageTable = MiAllocatePoolPages(NonPagedPool,
896 PoolBigPageTableSize *
897 sizeof(POOL_TRACKER_BIG_PAGES));
898 if (PoolBigPageTable) break;
899
900 if (PoolBigPageTableSize == 1)
901 {
902 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
903 TableSize,
904 0xFFFFFFFF,
905 0xFFFFFFFF,
906 0xFFFFFFFF);
907 }
908
909 PoolBigPageTableSize >>= 1;
910 }
911
912 //
913 // An extra entry is not needed for for the big pool tracker, so just
914 // compute the hash and zero it
915 //
916 PoolBigPageTableHash = PoolBigPageTableSize - 1;
917 RtlZeroMemory(PoolBigPageTable,
918 PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
919 for (i = 0; i < PoolBigPageTableSize; i++) PoolBigPageTable[i].Va = (PVOID)1;
920
921 //
922 // During development, print this out so we can see what's happening
923 //
924 DPRINT1("EXPOOL: Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
925 PoolTrackTable, PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
926 DPRINT1("EXPOOL: Big Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
927 PoolBigPageTable, PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
928
929 //
930 // Insert the generic tracker for all of big pool
931 //
932 ExpInsertPoolTracker('looP',
933 ROUND_TO_PAGES(PoolBigPageTableSize *
934 sizeof(POOL_TRACKER_BIG_PAGES)),
935 NonPagedPool);
936
937 //
938 // No support for NUMA systems at this time
939 //
940 ASSERT(KeNumberNodes == 1);
941
942 //
943 // Initialize the tag spinlock
944 //
945 KeInitializeSpinLock(&ExpTaggedPoolLock);
946
947 //
948 // Initialize the nonpaged pool descriptor
949 //
950 PoolVector[NonPagedPool] = &NonPagedPoolDescriptor;
951 ExInitializePoolDescriptor(PoolVector[NonPagedPool],
952 NonPagedPool,
953 0,
954 Threshold,
955 NULL);
956 }
957 else
958 {
959 //
960 // No support for NUMA systems at this time
961 //
962 ASSERT(KeNumberNodes == 1);
963
964 //
965 // Allocate the pool descriptor
966 //
967 Descriptor = ExAllocatePoolWithTag(NonPagedPool,
968 sizeof(KGUARDED_MUTEX) +
969 sizeof(POOL_DESCRIPTOR),
970 'looP');
971 if (!Descriptor)
972 {
973 //
974 // This is really bad...
975 //
976 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
977 0,
978 -1,
979 -1,
980 -1);
981 }
982
983 //
984 // Setup the vector and guarded mutex for paged pool
985 //
986 PoolVector[PagedPool] = Descriptor;
987 ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1);
988 ExpPagedPoolDescriptor[0] = Descriptor;
989 KeInitializeGuardedMutex(ExpPagedPoolMutex);
990 ExInitializePoolDescriptor(Descriptor,
991 PagedPool,
992 0,
993 Threshold,
994 ExpPagedPoolMutex);
995
996 //
997 // Insert the generic tracker for all of nonpaged pool
998 //
999 ExpInsertPoolTracker('looP',
1000 ROUND_TO_PAGES(PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)),
1001 NonPagedPool);
1002 }
1003 }
1004
1005 FORCEINLINE
1006 KIRQL
1007 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
1008 {
1009 //
1010 // Check if this is nonpaged pool
1011 //
1012 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1013 {
1014 //
1015 // Use the queued spin lock
1016 //
1017 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
1018 }
1019 else
1020 {
1021 //
1022 // Use the guarded mutex
1023 //
1024 KeAcquireGuardedMutex(Descriptor->LockAddress);
1025 return APC_LEVEL;
1026 }
1027 }
1028
1029 FORCEINLINE
1030 VOID
1031 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor,
1032 IN KIRQL OldIrql)
1033 {
1034 //
1035 // Check if this is nonpaged pool
1036 //
1037 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1038 {
1039 //
1040 // Use the queued spin lock
1041 //
1042 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
1043 }
1044 else
1045 {
1046 //
1047 // Use the guarded mutex
1048 //
1049 KeReleaseGuardedMutex(Descriptor->LockAddress);
1050 }
1051 }
1052
1053 VOID
1054 NTAPI
1055 ExpGetPoolTagInfoTarget(IN PKDPC Dpc,
1056 IN PVOID DeferredContext,
1057 IN PVOID SystemArgument1,
1058 IN PVOID SystemArgument2)
1059 {
1060 PPOOL_DPC_CONTEXT Context = DeferredContext;
1061 UNREFERENCED_PARAMETER(Dpc);
1062 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
1063
1064 //
1065 // Make sure we win the race, and if we did, copy the data atomically
1066 //
1067 if (KeSignalCallDpcSynchronize(SystemArgument2))
1068 {
1069 RtlCopyMemory(Context->PoolTrackTable,
1070 PoolTrackTable,
1071 Context->PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1072
1073 //
1074 // This is here because ReactOS does not yet support expansion
1075 //
1076 ASSERT(Context->PoolTrackTableSizeExpansion == 0);
1077 }
1078
1079 //
1080 // Regardless of whether we won or not, we must now synchronize and then
1081 // decrement the barrier since this is one more processor that has completed
1082 // the callback.
1083 //
1084 KeSignalCallDpcSynchronize(SystemArgument2);
1085 KeSignalCallDpcDone(SystemArgument1);
1086 }
1087
1088 NTSTATUS
1089 NTAPI
1090 ExGetPoolTagInfo(IN PSYSTEM_POOLTAG_INFORMATION SystemInformation,
1091 IN ULONG SystemInformationLength,
1092 IN OUT PULONG ReturnLength OPTIONAL)
1093 {
1094 ULONG TableSize, CurrentLength;
1095 ULONG EntryCount;
1096 NTSTATUS Status = STATUS_SUCCESS;
1097 PSYSTEM_POOLTAG TagEntry;
1098 PPOOL_TRACKER_TABLE Buffer, TrackerEntry;
1099 POOL_DPC_CONTEXT Context;
1100 ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
1101
1102 //
1103 // Keep track of how much data the caller's buffer must hold
1104 //
1105 CurrentLength = FIELD_OFFSET(SYSTEM_POOLTAG_INFORMATION, TagInfo);
1106
1107 //
1108 // Initialize the caller's buffer
1109 //
1110 TagEntry = &SystemInformation->TagInfo[0];
1111 SystemInformation->Count = 0;
1112
1113 //
1114 // Capture the number of entries, and the total size needed to make a copy
1115 // of the table
1116 //
1117 EntryCount = (ULONG)PoolTrackTableSize;
1118 TableSize = EntryCount * sizeof(POOL_TRACKER_TABLE);
1119
1120 //
1121 // Allocate the "Generic DPC" temporary buffer
1122 //
1123 Buffer = ExAllocatePoolWithTag(NonPagedPool, TableSize, 'ofnI');
1124 if (!Buffer) return STATUS_INSUFFICIENT_RESOURCES;
1125
1126 //
1127 // Do a "Generic DPC" to atomically retrieve the tag and allocation data
1128 //
1129 Context.PoolTrackTable = Buffer;
1130 Context.PoolTrackTableSize = PoolTrackTableSize;
1131 Context.PoolTrackTableExpansion = NULL;
1132 Context.PoolTrackTableSizeExpansion = 0;
1133 KeGenericCallDpc(ExpGetPoolTagInfoTarget, &Context);
1134
1135 //
1136 // Now parse the results
1137 //
1138 for (TrackerEntry = Buffer; TrackerEntry < (Buffer + EntryCount); TrackerEntry++)
1139 {
1140 //
1141 // If the entry is empty, skip it
1142 //
1143 if (!TrackerEntry->Key) continue;
1144
1145 //
1146 // Otherwise, add one more entry to the caller's buffer, and ensure that
1147 // enough space has been allocated in it
1148 //
1149 SystemInformation->Count++;
1150 CurrentLength += sizeof(*TagEntry);
1151 if (SystemInformationLength < CurrentLength)
1152 {
1153 //
1154 // The caller's buffer is too small, so set a failure code. The
1155 // caller will know the count, as well as how much space is needed.
1156 //
1157 // We do NOT break out of the loop, because we want to keep incrementing
1158 // the Count as well as CurrentLength so that the caller can know the
1159 // final numbers
1160 //
1161 Status = STATUS_INFO_LENGTH_MISMATCH;
1162 }
1163 else
1164 {
1165 //
1166 // Small sanity check that our accounting is working correctly
1167 //
1168 ASSERT(TrackerEntry->PagedAllocs >= TrackerEntry->PagedFrees);
1169 ASSERT(TrackerEntry->NonPagedAllocs >= TrackerEntry->NonPagedFrees);
1170
1171 //
1172 // Return the data into the caller's buffer
1173 //
1174 TagEntry->TagUlong = TrackerEntry->Key;
1175 TagEntry->PagedAllocs = TrackerEntry->PagedAllocs;
1176 TagEntry->PagedFrees = TrackerEntry->PagedFrees;
1177 TagEntry->PagedUsed = TrackerEntry->PagedBytes;
1178 TagEntry->NonPagedAllocs = TrackerEntry->NonPagedAllocs;
1179 TagEntry->NonPagedFrees = TrackerEntry->NonPagedFrees;
1180 TagEntry->NonPagedUsed = TrackerEntry->NonPagedBytes;
1181 TagEntry++;
1182 }
1183 }
1184
1185 //
1186 // Free the "Generic DPC" temporary buffer, return the buffer length and status
1187 //
1188 ExFreePoolWithTag(Buffer, 'ofnI');
1189 if (ReturnLength) *ReturnLength = CurrentLength;
1190 return Status;
1191 }
1192
1193 BOOLEAN
1194 NTAPI
1195 ExpAddTagForBigPages(IN PVOID Va,
1196 IN ULONG Key,
1197 IN ULONG NumberOfPages,
1198 IN POOL_TYPE PoolType)
1199 {
1200 ULONG Hash, i = 0;
1201 PVOID OldVa;
1202 KIRQL OldIrql;
1203 SIZE_T TableSize;
1204 PPOOL_TRACKER_BIG_PAGES Entry, EntryEnd, EntryStart;
1205 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1206 ASSERT(!(PoolType & SESSION_POOL_MASK));
1207
1208 //
1209 // As the table is expandable, these values must only be read after acquiring
1210 // the lock to avoid a teared access during an expansion
1211 //
1212 Hash = ExpComputePartialHashForAddress(Va);
1213 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1214 Hash &= PoolBigPageTableHash;
1215 TableSize = PoolBigPageTableSize;
1216
1217 //
1218 // We loop from the current hash bucket to the end of the table, and then
1219 // rollover to hash bucket 0 and keep going from there. If we return back
1220 // to the beginning, then we attempt expansion at the bottom of the loop
1221 //
1222 EntryStart = Entry = &PoolBigPageTable[Hash];
1223 EntryEnd = &PoolBigPageTable[TableSize];
1224 do
1225 {
1226 //
1227 // Make sure that this is a free entry and attempt to atomically make the
1228 // entry busy now
1229 //
1230 OldVa = Entry->Va;
1231 if (((ULONG_PTR)OldVa & POOL_BIG_TABLE_ENTRY_FREE) &&
1232 (InterlockedCompareExchangePointer(&Entry->Va, Va, OldVa) == OldVa))
1233 {
1234 //
1235 // We now own this entry, write down the size and the pool tag
1236 //
1237 Entry->Key = Key;
1238 Entry->NumberOfPages = NumberOfPages;
1239
1240 //
1241 // Add one more entry to the count, and see if we're getting within
1242 // 25% of the table size, at which point we'll do an expansion now
1243 // to avoid blocking too hard later on.
1244 //
1245 // Note that we only do this if it's also been the 16th time that we
1246 // keep losing the race or that we are not finding a free entry anymore,
1247 // which implies a massive number of concurrent big pool allocations.
1248 //
1249 InterlockedIncrementUL(&ExpPoolBigEntriesInUse);
1250 if ((i >= 16) && (ExpPoolBigEntriesInUse > (TableSize / 4)))
1251 {
1252 DPRINT1("Should attempt expansion since we now have %lu entries\n",
1253 ExpPoolBigEntriesInUse);
1254 }
1255
1256 //
1257 // We have our entry, return
1258 //
1259 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1260 return TRUE;
1261 }
1262
1263 //
1264 // We don't have our entry yet, so keep trying, making the entry list
1265 // circular if we reach the last entry. We'll eventually break out of
1266 // the loop once we've rolled over and returned back to our original
1267 // hash bucket
1268 //
1269 i++;
1270 if (++Entry >= EntryEnd) Entry = &PoolBigPageTable[0];
1271 } while (Entry != EntryStart);
1272
1273 //
1274 // This means there's no free hash buckets whatsoever, so we would now have
1275 // to attempt expanding the table
1276 //
1277 DPRINT1("Big pool expansion needed, not implemented!\n");
1278 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1279 return FALSE;
1280 }
1281
1282 ULONG
1283 NTAPI
1284 ExpFindAndRemoveTagBigPages(IN PVOID Va,
1285 OUT PULONG_PTR BigPages,
1286 IN POOL_TYPE PoolType)
1287 {
1288 BOOLEAN FirstTry = TRUE;
1289 SIZE_T TableSize;
1290 KIRQL OldIrql;
1291 ULONG PoolTag, Hash;
1292 PPOOL_TRACKER_BIG_PAGES Entry;
1293 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1294 ASSERT(!(PoolType & SESSION_POOL_MASK));
1295
1296 //
1297 // As the table is expandable, these values must only be read after acquiring
1298 // the lock to avoid a teared access during an expansion
1299 //
1300 Hash = ExpComputePartialHashForAddress(Va);
1301 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1302 Hash &= PoolBigPageTableHash;
1303 TableSize = PoolBigPageTableSize;
1304
1305 //
1306 // Loop while trying to find this big page allocation
1307 //
1308 while (PoolBigPageTable[Hash].Va != Va)
1309 {
1310 //
1311 // Increment the size until we go past the end of the table
1312 //
1313 if (++Hash >= TableSize)
1314 {
1315 //
1316 // Is this the second time we've tried?
1317 //
1318 if (!FirstTry)
1319 {
1320 //
1321 // This means it was never inserted into the pool table and it
1322 // received the special "BIG" tag -- return that and return 0
1323 // so that the code can ask Mm for the page count instead
1324 //
1325 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1326 *BigPages = 0;
1327 return ' GIB';
1328 }
1329
1330 //
1331 // The first time this happens, reset the hash index and try again
1332 //
1333 Hash = 0;
1334 FirstTry = FALSE;
1335 }
1336 }
1337
1338 //
1339 // Now capture all the information we need from the entry, since after we
1340 // release the lock, the data can change
1341 //
1342 Entry = &PoolBigPageTable[Hash];
1343 *BigPages = Entry->NumberOfPages;
1344 PoolTag = Entry->Key;
1345
1346 //
1347 // Set the free bit, and decrement the number of allocations. Finally, release
1348 // the lock and return the tag that was located
1349 //
1350 InterlockedIncrement((PLONG)&Entry->Va);
1351 InterlockedDecrementUL(&ExpPoolBigEntriesInUse);
1352 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1353 return PoolTag;
1354 }
1355
1356 VOID
1357 NTAPI
1358 ExQueryPoolUsage(OUT PULONG PagedPoolPages,
1359 OUT PULONG NonPagedPoolPages,
1360 OUT PULONG PagedPoolAllocs,
1361 OUT PULONG PagedPoolFrees,
1362 OUT PULONG PagedPoolLookasideHits,
1363 OUT PULONG NonPagedPoolAllocs,
1364 OUT PULONG NonPagedPoolFrees,
1365 OUT PULONG NonPagedPoolLookasideHits)
1366 {
1367 ULONG i;
1368 PPOOL_DESCRIPTOR PoolDesc;
1369
1370 //
1371 // Assume all failures
1372 //
1373 *PagedPoolPages = 0;
1374 *PagedPoolAllocs = 0;
1375 *PagedPoolFrees = 0;
1376
1377 //
1378 // Tally up the totals for all the apged pool
1379 //
1380 for (i = 0; i < ExpNumberOfPagedPools + 1; i++)
1381 {
1382 PoolDesc = ExpPagedPoolDescriptor[i];
1383 *PagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1384 *PagedPoolAllocs += PoolDesc->RunningAllocs;
1385 *PagedPoolFrees += PoolDesc->RunningDeAllocs;
1386 }
1387
1388 //
1389 // The first non-paged pool has a hardcoded well-known descriptor name
1390 //
1391 PoolDesc = &NonPagedPoolDescriptor;
1392 *NonPagedPoolPages = PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1393 *NonPagedPoolAllocs = PoolDesc->RunningAllocs;
1394 *NonPagedPoolFrees = PoolDesc->RunningDeAllocs;
1395
1396 //
1397 // If the system has more than one non-paged pool, copy the other descriptor
1398 // totals as well
1399 //
1400 #if 0
1401 if (ExpNumberOfNonPagedPools > 1)
1402 {
1403 for (i = 0; i < ExpNumberOfNonPagedPools; i++)
1404 {
1405 PoolDesc = ExpNonPagedPoolDescriptor[i];
1406 *NonPagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1407 *NonPagedPoolAllocs += PoolDesc->RunningAllocs;
1408 *NonPagedPoolFrees += PoolDesc->RunningDeAllocs;
1409 }
1410 }
1411 #endif
1412
1413 //
1414 // FIXME: Not yet supported
1415 //
1416 *NonPagedPoolLookasideHits += 0;
1417 *PagedPoolLookasideHits += 0;
1418 }
1419
1420 /* PUBLIC FUNCTIONS ***********************************************************/
1421
1422 /*
1423 * @implemented
1424 */
1425 PVOID
1426 NTAPI
1427 ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
1428 IN SIZE_T NumberOfBytes,
1429 IN ULONG Tag)
1430 {
1431 PPOOL_DESCRIPTOR PoolDesc;
1432 PLIST_ENTRY ListHead;
1433 PPOOL_HEADER Entry, NextEntry, FragmentEntry;
1434 KIRQL OldIrql;
1435 USHORT BlockSize, i;
1436 ULONG OriginalType;
1437 PKPRCB Prcb = KeGetCurrentPrcb();
1438 PGENERAL_LOOKASIDE LookasideList;
1439
1440 //
1441 // Some sanity checks
1442 //
1443 ASSERT(Tag != 0);
1444 ASSERT(Tag != ' GIB');
1445 ASSERT(NumberOfBytes != 0);
1446 ExpCheckPoolIrqlLevel(PoolType, NumberOfBytes, NULL);
1447
1448 //
1449 // Not supported in ReactOS
1450 //
1451 ASSERT(!(PoolType & SESSION_POOL_MASK));
1452
1453 //
1454 // Check if verifier or special pool is enabled
1455 //
1456 if (ExpPoolFlags & (POOL_FLAG_VERIFIER | POOL_FLAG_SPECIAL_POOL))
1457 {
1458 //
1459 // For verifier, we should call the verification routine
1460 //
1461 if (ExpPoolFlags & POOL_FLAG_VERIFIER)
1462 {
1463 DPRINT1("Driver Verifier is not yet supported\n");
1464 }
1465
1466 //
1467 // For special pool, we check if this is a suitable allocation and do
1468 // the special allocation if needed
1469 //
1470 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
1471 {
1472 //
1473 // Check if this is a special pool allocation
1474 //
1475 if (MmUseSpecialPool(NumberOfBytes, Tag))
1476 {
1477 //
1478 // Try to allocate using special pool
1479 //
1480 Entry = MmAllocateSpecialPool(NumberOfBytes, Tag, PoolType, 2);
1481 if (Entry) return Entry;
1482 }
1483 }
1484 }
1485
1486 //
1487 // Get the pool type and its corresponding vector for this request
1488 //
1489 OriginalType = PoolType;
1490 PoolType = PoolType & BASE_POOL_TYPE_MASK;
1491 PoolDesc = PoolVector[PoolType];
1492 ASSERT(PoolDesc != NULL);
1493
1494 //
1495 // Check if this is a big page allocation
1496 //
1497 if (NumberOfBytes > POOL_MAX_ALLOC)
1498 {
1499 //
1500 // Allocate pages for it
1501 //
1502 Entry = MiAllocatePoolPages(OriginalType, NumberOfBytes);
1503 if (!Entry)
1504 {
1505 //
1506 // Must succeed pool is deprecated, but still supported. These allocation
1507 // failures must cause an immediate bugcheck
1508 //
1509 if (OriginalType & MUST_SUCCEED_POOL_MASK)
1510 {
1511 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1512 NumberOfBytes,
1513 NonPagedPoolDescriptor.TotalPages,
1514 NonPagedPoolDescriptor.TotalBigPages,
1515 0);
1516 }
1517
1518 //
1519 // Internal debugging
1520 //
1521 ExPoolFailures++;
1522
1523 //
1524 // This flag requests printing failures, and can also further specify
1525 // breaking on failures
1526 //
1527 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
1528 {
1529 DPRINT1("EX: ExAllocatePool (%p, 0x%x) returning NULL\n",
1530 NumberOfBytes,
1531 OriginalType);
1532 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
1533 }
1534
1535 //
1536 // Finally, this flag requests an exception, which we are more than
1537 // happy to raise!
1538 //
1539 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
1540 {
1541 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
1542 }
1543 }
1544
1545 //
1546 // Increment required counters
1547 //
1548 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
1549 (LONG)BYTES_TO_PAGES(NumberOfBytes));
1550 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, NumberOfBytes);
1551 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
1552
1553 //
1554 // Add a tag for the big page allocation and switch to the generic "BIG"
1555 // tag if we failed to do so, then insert a tracker for this alloation.
1556 //
1557 if (!ExpAddTagForBigPages(Entry,
1558 Tag,
1559 (ULONG)BYTES_TO_PAGES(NumberOfBytes),
1560 OriginalType))
1561 {
1562 Tag = ' GIB';
1563 }
1564 ExpInsertPoolTracker(Tag, ROUND_TO_PAGES(NumberOfBytes), OriginalType);
1565 return Entry;
1566 }
1567
1568 //
1569 // Should never request 0 bytes from the pool, but since so many drivers do
1570 // it, we'll just assume they want 1 byte, based on NT's similar behavior
1571 //
1572 if (!NumberOfBytes) NumberOfBytes = 1;
1573
1574 //
1575 // A pool allocation is defined by its data, a linked list to connect it to
1576 // the free list (if necessary), and a pool header to store accounting info.
1577 // Calculate this size, then convert it into a block size (units of pool
1578 // headers)
1579 //
1580 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
1581 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
1582 // the direct allocation of pages.
1583 //
1584 i = (USHORT)((NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1))
1585 / POOL_BLOCK_SIZE);
1586 ASSERT(i < POOL_LISTS_PER_PAGE);
1587
1588 //
1589 // Handle lookaside list optimization for both paged and nonpaged pool
1590 //
1591 if (i <= MAXIMUM_PROCESSORS)
1592 {
1593 //
1594 // Try popping it from the per-CPU lookaside list
1595 //
1596 LookasideList = (PoolType == PagedPool) ?
1597 Prcb->PPPagedLookasideList[i - 1].P :
1598 Prcb->PPNPagedLookasideList[i - 1].P;
1599 LookasideList->TotalAllocates++;
1600 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
1601 if (!Entry)
1602 {
1603 //
1604 // We failed, try popping it from the global list
1605 //
1606 LookasideList = (PoolType == PagedPool) ?
1607 Prcb->PPPagedLookasideList[i - 1].L :
1608 Prcb->PPNPagedLookasideList[i - 1].L;
1609 LookasideList->TotalAllocates++;
1610 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
1611 }
1612
1613 //
1614 // If we were able to pop it, update the accounting and return the block
1615 //
1616 if (Entry)
1617 {
1618 LookasideList->AllocateHits++;
1619
1620 //
1621 // Get the real entry, write down its pool type, and track it
1622 //
1623 Entry--;
1624 Entry->PoolType = PoolType + 1;
1625 ExpInsertPoolTracker(Tag,
1626 Entry->BlockSize * POOL_BLOCK_SIZE,
1627 OriginalType);
1628
1629 //
1630 // Return the pool allocation
1631 //
1632 Entry->PoolTag = Tag;
1633 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
1634 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
1635 return POOL_FREE_BLOCK(Entry);
1636 }
1637 }
1638
1639 //
1640 // Loop in the free lists looking for a block if this size. Start with the
1641 // list optimized for this kind of size lookup
1642 //
1643 ListHead = &PoolDesc->ListHeads[i];
1644 do
1645 {
1646 //
1647 // Are there any free entries available on this list?
1648 //
1649 if (!ExpIsPoolListEmpty(ListHead))
1650 {
1651 //
1652 // Acquire the pool lock now
1653 //
1654 OldIrql = ExLockPool(PoolDesc);
1655
1656 //
1657 // And make sure the list still has entries
1658 //
1659 if (ExpIsPoolListEmpty(ListHead))
1660 {
1661 //
1662 // Someone raced us (and won) before we had a chance to acquire
1663 // the lock.
1664 //
1665 // Try again!
1666 //
1667 ExUnlockPool(PoolDesc, OldIrql);
1668 continue;
1669 }
1670
1671 //
1672 // Remove a free entry from the list
1673 // Note that due to the way we insert free blocks into multiple lists
1674 // there is a guarantee that any block on this list will either be
1675 // of the correct size, or perhaps larger.
1676 //
1677 ExpCheckPoolLinks(ListHead);
1678 Entry = POOL_ENTRY(ExpRemovePoolHeadList(ListHead));
1679 ExpCheckPoolLinks(ListHead);
1680 ExpCheckPoolBlocks(Entry);
1681 ASSERT(Entry->BlockSize >= i);
1682 ASSERT(Entry->PoolType == 0);
1683
1684 //
1685 // Check if this block is larger that what we need. The block could
1686 // not possibly be smaller, due to the reason explained above (and
1687 // we would've asserted on a checked build if this was the case).
1688 //
1689 if (Entry->BlockSize != i)
1690 {
1691 //
1692 // Is there an entry before this one?
1693 //
1694 if (Entry->PreviousSize == 0)
1695 {
1696 //
1697 // There isn't anyone before us, so take the next block and
1698 // turn it into a fragment that contains the leftover data
1699 // that we don't need to satisfy the caller's request
1700 //
1701 FragmentEntry = POOL_BLOCK(Entry, i);
1702 FragmentEntry->BlockSize = Entry->BlockSize - i;
1703
1704 //
1705 // And make it point back to us
1706 //
1707 FragmentEntry->PreviousSize = i;
1708
1709 //
1710 // Now get the block that follows the new fragment and check
1711 // if it's still on the same page as us (and not at the end)
1712 //
1713 NextEntry = POOL_NEXT_BLOCK(FragmentEntry);
1714 if (PAGE_ALIGN(NextEntry) != NextEntry)
1715 {
1716 //
1717 // Adjust this next block to point to our newly created
1718 // fragment block
1719 //
1720 NextEntry->PreviousSize = FragmentEntry->BlockSize;
1721 }
1722 }
1723 else
1724 {
1725 //
1726 // There is a free entry before us, which we know is smaller
1727 // so we'll make this entry the fragment instead
1728 //
1729 FragmentEntry = Entry;
1730
1731 //
1732 // And then we'll remove from it the actual size required.
1733 // Now the entry is a leftover free fragment
1734 //
1735 Entry->BlockSize -= i;
1736
1737 //
1738 // Now let's go to the next entry after the fragment (which
1739 // used to point to our original free entry) and make it
1740 // reference the new fragment entry instead.
1741 //
1742 // This is the entry that will actually end up holding the
1743 // allocation!
1744 //
1745 Entry = POOL_NEXT_BLOCK(Entry);
1746 Entry->PreviousSize = FragmentEntry->BlockSize;
1747
1748 //
1749 // And now let's go to the entry after that one and check if
1750 // it's still on the same page, and not at the end
1751 //
1752 NextEntry = POOL_BLOCK(Entry, i);
1753 if (PAGE_ALIGN(NextEntry) != NextEntry)
1754 {
1755 //
1756 // Make it reference the allocation entry
1757 //
1758 NextEntry->PreviousSize = i;
1759 }
1760 }
1761
1762 //
1763 // Now our (allocation) entry is the right size
1764 //
1765 Entry->BlockSize = i;
1766
1767 //
1768 // And the next entry is now the free fragment which contains
1769 // the remaining difference between how big the original entry
1770 // was, and the actual size the caller needs/requested.
1771 //
1772 FragmentEntry->PoolType = 0;
1773 BlockSize = FragmentEntry->BlockSize;
1774
1775 //
1776 // Now check if enough free bytes remained for us to have a
1777 // "full" entry, which contains enough bytes for a linked list
1778 // and thus can be used for allocations (up to 8 bytes...)
1779 //
1780 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
1781 if (BlockSize != 1)
1782 {
1783 //
1784 // Insert the free entry into the free list for this size
1785 //
1786 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
1787 POOL_FREE_BLOCK(FragmentEntry));
1788 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
1789 }
1790 }
1791
1792 //
1793 // We have found an entry for this allocation, so set the pool type
1794 // and release the lock since we're done
1795 //
1796 Entry->PoolType = PoolType + 1;
1797 ExpCheckPoolBlocks(Entry);
1798 ExUnlockPool(PoolDesc, OldIrql);
1799
1800 //
1801 // Increment required counters
1802 //
1803 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
1804 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
1805
1806 //
1807 // Track this allocation
1808 //
1809 ExpInsertPoolTracker(Tag,
1810 Entry->BlockSize * POOL_BLOCK_SIZE,
1811 OriginalType);
1812
1813 //
1814 // Return the pool allocation
1815 //
1816 Entry->PoolTag = Tag;
1817 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
1818 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
1819 return POOL_FREE_BLOCK(Entry);
1820 }
1821 } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
1822
1823 //
1824 // There were no free entries left, so we have to allocate a new fresh page
1825 //
1826 Entry = MiAllocatePoolPages(PoolType, PAGE_SIZE);
1827 if (!Entry)
1828 {
1829 //
1830 // Must succeed pool is deprecated, but still supported. These allocation
1831 // failures must cause an immediate bugcheck
1832 //
1833 if (OriginalType & MUST_SUCCEED_POOL_MASK)
1834 {
1835 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1836 PAGE_SIZE,
1837 NonPagedPoolDescriptor.TotalPages,
1838 NonPagedPoolDescriptor.TotalBigPages,
1839 0);
1840 }
1841
1842 //
1843 // Internal debugging
1844 //
1845 ExPoolFailures++;
1846
1847 //
1848 // This flag requests printing failures, and can also further specify
1849 // breaking on failures
1850 //
1851 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
1852 {
1853 DPRINT1("EX: ExAllocatePool (%p, 0x%x) returning NULL\n",
1854 NumberOfBytes,
1855 OriginalType);
1856 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
1857 }
1858
1859 //
1860 // Finally, this flag requests an exception, which we are more than
1861 // happy to raise!
1862 //
1863 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
1864 {
1865 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
1866 }
1867
1868 //
1869 // Return NULL to the caller in all other cases
1870 //
1871 return NULL;
1872 }
1873
1874 //
1875 // Setup the entry data
1876 //
1877 Entry->Ulong1 = 0;
1878 Entry->BlockSize = i;
1879 Entry->PoolType = PoolType + 1;
1880
1881 //
1882 // This page will have two entries -- one for the allocation (which we just
1883 // created above), and one for the remaining free bytes, which we're about
1884 // to create now. The free bytes are the whole page minus what was allocated
1885 // and then converted into units of block headers.
1886 //
1887 BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i;
1888 FragmentEntry = POOL_BLOCK(Entry, i);
1889 FragmentEntry->Ulong1 = 0;
1890 FragmentEntry->BlockSize = BlockSize;
1891 FragmentEntry->PreviousSize = i;
1892
1893 //
1894 // Increment required counters
1895 //
1896 InterlockedIncrement((PLONG)&PoolDesc->TotalPages);
1897 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
1898
1899 //
1900 // Now check if enough free bytes remained for us to have a "full" entry,
1901 // which contains enough bytes for a linked list and thus can be used for
1902 // allocations (up to 8 bytes...)
1903 //
1904 if (FragmentEntry->BlockSize != 1)
1905 {
1906 //
1907 // Excellent -- acquire the pool lock
1908 //
1909 OldIrql = ExLockPool(PoolDesc);
1910
1911 //
1912 // And insert the free entry into the free list for this block size
1913 //
1914 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
1915 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
1916 POOL_FREE_BLOCK(FragmentEntry));
1917 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
1918
1919 //
1920 // Release the pool lock
1921 //
1922 ExpCheckPoolBlocks(Entry);
1923 ExUnlockPool(PoolDesc, OldIrql);
1924 }
1925 else
1926 {
1927 //
1928 // Simply do a sanity check
1929 //
1930 ExpCheckPoolBlocks(Entry);
1931 }
1932
1933 //
1934 // Increment performance counters and track this allocation
1935 //
1936 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
1937 ExpInsertPoolTracker(Tag,
1938 Entry->BlockSize * POOL_BLOCK_SIZE,
1939 PoolType);
1940
1941 //
1942 // And return the pool allocation
1943 //
1944 ExpCheckPoolBlocks(Entry);
1945 Entry->PoolTag = Tag;
1946 return POOL_FREE_BLOCK(Entry);
1947 }
1948
1949 /*
1950 * @implemented
1951 */
1952 PVOID
1953 NTAPI
1954 ExAllocatePool(POOL_TYPE PoolType,
1955 SIZE_T NumberOfBytes)
1956 {
1957 //
1958 // Use a default tag of "None"
1959 //
1960 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, TAG_NONE);
1961 }
1962
1963 /*
1964 * @implemented
1965 */
1966 VOID
1967 NTAPI
1968 ExFreePoolWithTag(IN PVOID P,
1969 IN ULONG TagToFree)
1970 {
1971 PPOOL_HEADER Entry, NextEntry;
1972 USHORT BlockSize;
1973 KIRQL OldIrql;
1974 POOL_TYPE PoolType;
1975 PPOOL_DESCRIPTOR PoolDesc;
1976 ULONG Tag;
1977 BOOLEAN Combined = FALSE;
1978 PFN_NUMBER PageCount, RealPageCount;
1979 PKPRCB Prcb = KeGetCurrentPrcb();
1980 PGENERAL_LOOKASIDE LookasideList;
1981
1982 //
1983 // Check if any of the debug flags are enabled
1984 //
1985 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
1986 POOL_FLAG_CHECK_WORKERS |
1987 POOL_FLAG_CHECK_RESOURCES |
1988 POOL_FLAG_VERIFIER |
1989 POOL_FLAG_CHECK_DEADLOCK |
1990 POOL_FLAG_SPECIAL_POOL))
1991 {
1992 //
1993 // Check if special pool is enabled
1994 //
1995 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
1996 {
1997 //
1998 // Check if it was allocated from a special pool
1999 //
2000 if (MmIsSpecialPoolAddress(P))
2001 {
2002 //
2003 // Was deadlock verification also enabled? We can do some extra
2004 // checks at this point
2005 //
2006 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2007 {
2008 DPRINT1("Verifier not yet supported\n");
2009 }
2010
2011 //
2012 // It is, so handle it via special pool free routine
2013 //
2014 MmFreeSpecialPool(P);
2015 return;
2016 }
2017 }
2018
2019 //
2020 // For non-big page allocations, we'll do a bunch of checks in here
2021 //
2022 if (PAGE_ALIGN(P) != P)
2023 {
2024 //
2025 // Get the entry for this pool allocation
2026 // The pointer math here may look wrong or confusing, but it is quite right
2027 //
2028 Entry = P;
2029 Entry--;
2030
2031 //
2032 // Get the pool type
2033 //
2034 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2035
2036 //
2037 // FIXME: Many other debugging checks go here
2038 //
2039 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2040 }
2041 }
2042
2043 //
2044 // Check if this is a big page allocation
2045 //
2046 if (PAGE_ALIGN(P) == P)
2047 {
2048 //
2049 // We need to find the tag for it, so first we need to find out what
2050 // kind of allocation this was (paged or nonpaged), then we can go
2051 // ahead and try finding the tag for it. Remember to get rid of the
2052 // PROTECTED_POOL tag if it's found.
2053 //
2054 // Note that if at insertion time, we failed to add the tag for a big
2055 // pool allocation, we used a special tag called 'BIG' to identify the
2056 // allocation, and we may get this tag back. In this scenario, we must
2057 // manually get the size of the allocation by actually counting through
2058 // the PFN database.
2059 //
2060 PoolType = MmDeterminePoolType(P);
2061 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2062 Tag = ExpFindAndRemoveTagBigPages(P, &PageCount, PoolType);
2063 if (!Tag)
2064 {
2065 DPRINT1("We do not know the size of this allocation. This is not yet supported\n");
2066 ASSERT(Tag == ' GIB');
2067 PageCount = 1; // We are going to lie! This might screw up accounting?
2068 }
2069 else if (Tag & PROTECTED_POOL)
2070 {
2071 Tag &= ~PROTECTED_POOL;
2072 }
2073
2074 //
2075 // We have our tag and our page count, so we can go ahead and remove this
2076 // tracker now
2077 //
2078 ExpRemovePoolTracker(Tag, PageCount << PAGE_SHIFT, PoolType);
2079
2080 //
2081 // Check if any of the debug flags are enabled
2082 //
2083 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
2084 POOL_FLAG_CHECK_WORKERS |
2085 POOL_FLAG_CHECK_RESOURCES |
2086 POOL_FLAG_CHECK_DEADLOCK))
2087 {
2088 //
2089 // Was deadlock verification also enabled? We can do some extra
2090 // checks at this point
2091 //
2092 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2093 {
2094 DPRINT1("Verifier not yet supported\n");
2095 }
2096
2097 //
2098 // FIXME: Many debugging checks go here
2099 //
2100 }
2101
2102 //
2103 // Update counters
2104 //
2105 PoolDesc = PoolVector[PoolType];
2106 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2107 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes,
2108 -(LONG_PTR)(PageCount << PAGE_SHIFT));
2109
2110 //
2111 // Do the real free now and update the last counter with the big page count
2112 //
2113 RealPageCount = MiFreePoolPages(P);
2114 ASSERT(RealPageCount == PageCount);
2115 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
2116 -(LONG)RealPageCount);
2117 return;
2118 }
2119
2120 //
2121 // Get the entry for this pool allocation
2122 // The pointer math here may look wrong or confusing, but it is quite right
2123 //
2124 Entry = P;
2125 Entry--;
2126
2127 //
2128 // Get the size of the entry, and it's pool type, then load the descriptor
2129 // for this pool type
2130 //
2131 BlockSize = Entry->BlockSize;
2132 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2133 PoolDesc = PoolVector[PoolType];
2134
2135 //
2136 // Make sure that the IRQL makes sense
2137 //
2138 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2139
2140 //
2141 // Get the pool tag and get rid of the PROTECTED_POOL flag
2142 //
2143 Tag = Entry->PoolTag;
2144 if (Tag & PROTECTED_POOL) Tag &= ~PROTECTED_POOL;
2145
2146 //
2147 // Stop tracking this allocation
2148 //
2149 ExpRemovePoolTracker(Tag,
2150 BlockSize * POOL_BLOCK_SIZE,
2151 Entry->PoolType - 1);
2152
2153 //
2154 // Check block tag
2155 //
2156 if (TagToFree && TagToFree != Tag)
2157 {
2158 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2159 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2160 }
2161
2162 //
2163 // Is this allocation small enough to have come from a lookaside list?
2164 //
2165 if (BlockSize <= MAXIMUM_PROCESSORS)
2166 {
2167 //
2168 // Try pushing it into the per-CPU lookaside list
2169 //
2170 LookasideList = (PoolType == PagedPool) ?
2171 Prcb->PPPagedLookasideList[BlockSize - 1].P :
2172 Prcb->PPNPagedLookasideList[BlockSize - 1].P;
2173 LookasideList->TotalFrees++;
2174 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2175 {
2176 LookasideList->FreeHits++;
2177 InterlockedPushEntrySList(&LookasideList->ListHead, P);
2178 return;
2179 }
2180
2181 //
2182 // We failed, try to push it into the global lookaside list
2183 //
2184 LookasideList = (PoolType == PagedPool) ?
2185 Prcb->PPPagedLookasideList[BlockSize - 1].L :
2186 Prcb->PPNPagedLookasideList[BlockSize - 1].L;
2187 LookasideList->TotalFrees++;
2188 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2189 {
2190 LookasideList->FreeHits++;
2191 InterlockedPushEntrySList(&LookasideList->ListHead, P);
2192 return;
2193 }
2194 }
2195
2196 //
2197 // Get the pointer to the next entry
2198 //
2199 NextEntry = POOL_BLOCK(Entry, BlockSize);
2200
2201 //
2202 // Update performance counters
2203 //
2204 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2205 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, -BlockSize * POOL_BLOCK_SIZE);
2206
2207 //
2208 // Acquire the pool lock
2209 //
2210 OldIrql = ExLockPool(PoolDesc);
2211
2212 //
2213 // Check if the next allocation is at the end of the page
2214 //
2215 ExpCheckPoolBlocks(Entry);
2216 if (PAGE_ALIGN(NextEntry) != NextEntry)
2217 {
2218 //
2219 // We may be able to combine the block if it's free
2220 //
2221 if (NextEntry->PoolType == 0)
2222 {
2223 //
2224 // The next block is free, so we'll do a combine
2225 //
2226 Combined = TRUE;
2227
2228 //
2229 // Make sure there's actual data in the block -- anything smaller
2230 // than this means we only have the header, so there's no linked list
2231 // for us to remove
2232 //
2233 if ((NextEntry->BlockSize != 1))
2234 {
2235 //
2236 // The block is at least big enough to have a linked list, so go
2237 // ahead and remove it
2238 //
2239 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2240 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2241 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2242 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2243 }
2244
2245 //
2246 // Our entry is now combined with the next entry
2247 //
2248 Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
2249 }
2250 }
2251
2252 //
2253 // Now check if there was a previous entry on the same page as us
2254 //
2255 if (Entry->PreviousSize)
2256 {
2257 //
2258 // Great, grab that entry and check if it's free
2259 //
2260 NextEntry = POOL_PREV_BLOCK(Entry);
2261 if (NextEntry->PoolType == 0)
2262 {
2263 //
2264 // It is, so we can do a combine
2265 //
2266 Combined = TRUE;
2267
2268 //
2269 // Make sure there's actual data in the block -- anything smaller
2270 // than this means we only have the header so there's no linked list
2271 // for us to remove
2272 //
2273 if ((NextEntry->BlockSize != 1))
2274 {
2275 //
2276 // The block is at least big enough to have a linked list, so go
2277 // ahead and remove it
2278 //
2279 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2280 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2281 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2282 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2283 }
2284
2285 //
2286 // Combine our original block (which might've already been combined
2287 // with the next block), into the previous block
2288 //
2289 NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
2290
2291 //
2292 // And now we'll work with the previous block instead
2293 //
2294 Entry = NextEntry;
2295 }
2296 }
2297
2298 //
2299 // By now, it may have been possible for our combined blocks to actually
2300 // have made up a full page (if there were only 2-3 allocations on the
2301 // page, they could've all been combined).
2302 //
2303 if ((PAGE_ALIGN(Entry) == Entry) &&
2304 (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry)) == POOL_NEXT_BLOCK(Entry)))
2305 {
2306 //
2307 // In this case, release the pool lock, update the performance counter,
2308 // and free the page
2309 //
2310 ExUnlockPool(PoolDesc, OldIrql);
2311 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalPages, -1);
2312 MiFreePoolPages(Entry);
2313 return;
2314 }
2315
2316 //
2317 // Otherwise, we now have a free block (or a combination of 2 or 3)
2318 //
2319 Entry->PoolType = 0;
2320 BlockSize = Entry->BlockSize;
2321 ASSERT(BlockSize != 1);
2322
2323 //
2324 // Check if we actually did combine it with anyone
2325 //
2326 if (Combined)
2327 {
2328 //
2329 // Get the first combined block (either our original to begin with, or
2330 // the one after the original, depending if we combined with the previous)
2331 //
2332 NextEntry = POOL_NEXT_BLOCK(Entry);
2333
2334 //
2335 // As long as the next block isn't on a page boundary, have it point
2336 // back to us
2337 //
2338 if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
2339 }
2340
2341 //
2342 // Insert this new free block, and release the pool lock
2343 //
2344 ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
2345 ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry));
2346 ExUnlockPool(PoolDesc, OldIrql);
2347 }
2348
2349 /*
2350 * @implemented
2351 */
2352 VOID
2353 NTAPI
2354 ExFreePool(PVOID P)
2355 {
2356 //
2357 // Just free without checking for the tag
2358 //
2359 ExFreePoolWithTag(P, 0);
2360 }
2361
2362 /*
2363 * @unimplemented
2364 */
2365 SIZE_T
2366 NTAPI
2367 ExQueryPoolBlockSize(IN PVOID PoolBlock,
2368 OUT PBOOLEAN QuotaCharged)
2369 {
2370 //
2371 // Not implemented
2372 //
2373 UNIMPLEMENTED;
2374 return FALSE;
2375 }
2376
2377 /*
2378 * @implemented
2379 */
2380
2381 PVOID
2382 NTAPI
2383 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType,
2384 IN SIZE_T NumberOfBytes)
2385 {
2386 //
2387 // Allocate the pool
2388 //
2389 return ExAllocatePoolWithQuotaTag(PoolType, NumberOfBytes, 'enoN');
2390 }
2391
2392 /*
2393 * @implemented
2394 */
2395 PVOID
2396 NTAPI
2397 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType,
2398 IN SIZE_T NumberOfBytes,
2399 IN ULONG Tag,
2400 IN EX_POOL_PRIORITY Priority)
2401 {
2402 //
2403 // Allocate the pool
2404 //
2405 UNIMPLEMENTED;
2406 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2407 }
2408
2409 /*
2410 * @implemented
2411 */
2412 PVOID
2413 NTAPI
2414 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType,
2415 IN SIZE_T NumberOfBytes,
2416 IN ULONG Tag)
2417 {
2418 //
2419 // Allocate the pool
2420 //
2421 UNIMPLEMENTED;
2422 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2423 }
2424
2425 /* EOF */