[NTOSKRNL:MM]
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / expool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include "../ARM3/miarm.h"
17
18 #undef ExAllocatePoolWithQuota
19 #undef ExAllocatePoolWithQuotaTag
20
21 /* GLOBALS ********************************************************************/
22
23 #define POOL_BIG_TABLE_ENTRY_FREE 0x1
24
25 typedef struct _POOL_DPC_CONTEXT
26 {
27 PPOOL_TRACKER_TABLE PoolTrackTable;
28 SIZE_T PoolTrackTableSize;
29 PPOOL_TRACKER_TABLE PoolTrackTableExpansion;
30 SIZE_T PoolTrackTableSizeExpansion;
31 } POOL_DPC_CONTEXT, *PPOOL_DPC_CONTEXT;
32
33 ULONG ExpNumberOfPagedPools;
34 POOL_DESCRIPTOR NonPagedPoolDescriptor;
35 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16 + 1];
36 PPOOL_DESCRIPTOR PoolVector[2];
37 PKGUARDED_MUTEX ExpPagedPoolMutex;
38 SIZE_T PoolTrackTableSize, PoolTrackTableMask;
39 SIZE_T PoolBigPageTableSize, PoolBigPageTableHash;
40 PPOOL_TRACKER_TABLE PoolTrackTable;
41 PPOOL_TRACKER_BIG_PAGES PoolBigPageTable;
42 KSPIN_LOCK ExpTaggedPoolLock;
43 ULONG PoolHitTag;
44 BOOLEAN ExStopBadTags;
45 KSPIN_LOCK ExpLargePoolTableLock;
46 ULONG ExpPoolBigEntriesInUse;
47 ULONG ExpPoolFlags;
48 ULONG ExPoolFailures;
49
50 /* Pool block/header/list access macros */
51 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
52 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER))
53 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
54 #define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
55 #define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize))
56
57 /*
58 * Pool list access debug macros, similar to Arthur's pfnlist.c work.
59 * Microsoft actually implements similar checks in the Windows Server 2003 SP1
60 * pool code, but only for checked builds.
61 *
62 * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
63 * that these checks are done even on retail builds, due to the increasing
64 * number of kernel-mode attacks which depend on dangling list pointers and other
65 * kinds of list-based attacks.
66 *
67 * For now, I will leave these checks on all the time, but later they are likely
68 * to be DBG-only, at least until there are enough kernel-mode security attacks
69 * against ReactOS to warrant the performance hit.
70 *
71 * For now, these are not made inline, so we can get good stack traces.
72 */
73 PLIST_ENTRY
74 NTAPI
75 ExpDecodePoolLink(IN PLIST_ENTRY Link)
76 {
77 return (PLIST_ENTRY)((ULONG_PTR)Link & ~1);
78 }
79
80 PLIST_ENTRY
81 NTAPI
82 ExpEncodePoolLink(IN PLIST_ENTRY Link)
83 {
84 return (PLIST_ENTRY)((ULONG_PTR)Link | 1);
85 }
86
87 VOID
88 NTAPI
89 ExpCheckPoolLinks(IN PLIST_ENTRY ListHead)
90 {
91 if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) ||
92 (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead))
93 {
94 KeBugCheckEx(BAD_POOL_HEADER,
95 3,
96 (ULONG_PTR)ListHead,
97 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink),
98 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink));
99 }
100 }
101
102 VOID
103 NTAPI
104 ExpInitializePoolListHead(IN PLIST_ENTRY ListHead)
105 {
106 ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead);
107 }
108
109 BOOLEAN
110 NTAPI
111 ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead)
112 {
113 return (ExpDecodePoolLink(ListHead->Flink) == ListHead);
114 }
115
116 VOID
117 NTAPI
118 ExpRemovePoolEntryList(IN PLIST_ENTRY Entry)
119 {
120 PLIST_ENTRY Blink, Flink;
121 Flink = ExpDecodePoolLink(Entry->Flink);
122 Blink = ExpDecodePoolLink(Entry->Blink);
123 Flink->Blink = ExpEncodePoolLink(Blink);
124 Blink->Flink = ExpEncodePoolLink(Flink);
125 }
126
127 PLIST_ENTRY
128 NTAPI
129 ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead)
130 {
131 PLIST_ENTRY Entry, Flink;
132 Entry = ExpDecodePoolLink(ListHead->Flink);
133 Flink = ExpDecodePoolLink(Entry->Flink);
134 ListHead->Flink = ExpEncodePoolLink(Flink);
135 Flink->Blink = ExpEncodePoolLink(ListHead);
136 return Entry;
137 }
138
139 PLIST_ENTRY
140 NTAPI
141 ExpRemovePoolTailList(IN PLIST_ENTRY ListHead)
142 {
143 PLIST_ENTRY Entry, Blink;
144 Entry = ExpDecodePoolLink(ListHead->Blink);
145 Blink = ExpDecodePoolLink(Entry->Blink);
146 ListHead->Blink = ExpEncodePoolLink(Blink);
147 Blink->Flink = ExpEncodePoolLink(ListHead);
148 return Entry;
149 }
150
151 VOID
152 NTAPI
153 ExpInsertPoolTailList(IN PLIST_ENTRY ListHead,
154 IN PLIST_ENTRY Entry)
155 {
156 PLIST_ENTRY Blink;
157 ExpCheckPoolLinks(ListHead);
158 Blink = ExpDecodePoolLink(ListHead->Blink);
159 Entry->Flink = ExpEncodePoolLink(ListHead);
160 Entry->Blink = ExpEncodePoolLink(Blink);
161 Blink->Flink = ExpEncodePoolLink(Entry);
162 ListHead->Blink = ExpEncodePoolLink(Entry);
163 ExpCheckPoolLinks(ListHead);
164 }
165
166 VOID
167 NTAPI
168 ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead,
169 IN PLIST_ENTRY Entry)
170 {
171 PLIST_ENTRY Flink;
172 ExpCheckPoolLinks(ListHead);
173 Flink = ExpDecodePoolLink(ListHead->Flink);
174 Entry->Flink = ExpEncodePoolLink(Flink);
175 Entry->Blink = ExpEncodePoolLink(ListHead);
176 Flink->Blink = ExpEncodePoolLink(Entry);
177 ListHead->Flink = ExpEncodePoolLink(Entry);
178 ExpCheckPoolLinks(ListHead);
179 }
180
181 VOID
182 NTAPI
183 ExpCheckPoolHeader(IN PPOOL_HEADER Entry)
184 {
185 PPOOL_HEADER PreviousEntry, NextEntry;
186
187 /* Is there a block before this one? */
188 if (Entry->PreviousSize)
189 {
190 /* Get it */
191 PreviousEntry = POOL_PREV_BLOCK(Entry);
192
193 /* The two blocks must be on the same page! */
194 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry))
195 {
196 /* Something is awry */
197 KeBugCheckEx(BAD_POOL_HEADER,
198 6,
199 (ULONG_PTR)PreviousEntry,
200 __LINE__,
201 (ULONG_PTR)Entry);
202 }
203
204 /* This block should also indicate that it's as large as we think it is */
205 if (PreviousEntry->BlockSize != Entry->PreviousSize)
206 {
207 /* Otherwise, someone corrupted one of the sizes */
208 DPRINT1("PreviousEntry BlockSize %lu, tag %.4s. Entry PreviousSize %lu, tag %.4s\n",
209 PreviousEntry->BlockSize, (char *)&PreviousEntry->PoolTag,
210 Entry->PreviousSize, (char *)&Entry->PoolTag);
211 KeBugCheckEx(BAD_POOL_HEADER,
212 5,
213 (ULONG_PTR)PreviousEntry,
214 __LINE__,
215 (ULONG_PTR)Entry);
216 }
217 }
218 else if (PAGE_ALIGN(Entry) != Entry)
219 {
220 /* If there's no block before us, we are the first block, so we should be on a page boundary */
221 KeBugCheckEx(BAD_POOL_HEADER,
222 7,
223 0,
224 __LINE__,
225 (ULONG_PTR)Entry);
226 }
227
228 /* This block must have a size */
229 if (!Entry->BlockSize)
230 {
231 /* Someone must've corrupted this field */
232 if (Entry->PreviousSize)
233 {
234 PreviousEntry = POOL_PREV_BLOCK(Entry);
235 DPRINT1("PreviousEntry tag %.4s. Entry tag %.4s\n",
236 (char *)&PreviousEntry->PoolTag,
237 (char *)&Entry->PoolTag);
238 }
239 else
240 {
241 DPRINT1("Entry tag %.4s\n",
242 (char *)&Entry->PoolTag);
243 }
244 KeBugCheckEx(BAD_POOL_HEADER,
245 8,
246 0,
247 __LINE__,
248 (ULONG_PTR)Entry);
249 }
250
251 /* Okay, now get the next block */
252 NextEntry = POOL_NEXT_BLOCK(Entry);
253
254 /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
255 if (PAGE_ALIGN(NextEntry) != NextEntry)
256 {
257 /* The two blocks must be on the same page! */
258 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry))
259 {
260 /* Something is messed up */
261 KeBugCheckEx(BAD_POOL_HEADER,
262 9,
263 (ULONG_PTR)NextEntry,
264 __LINE__,
265 (ULONG_PTR)Entry);
266 }
267
268 /* And this block should think we are as large as we truly are */
269 if (NextEntry->PreviousSize != Entry->BlockSize)
270 {
271 /* Otherwise, someone corrupted the field */
272 DPRINT1("Entry BlockSize %lu, tag %.4s. NextEntry PreviousSize %lu, tag %.4s\n",
273 Entry->BlockSize, (char *)&Entry->PoolTag,
274 NextEntry->PreviousSize, (char *)&NextEntry->PoolTag);
275 KeBugCheckEx(BAD_POOL_HEADER,
276 5,
277 (ULONG_PTR)NextEntry,
278 __LINE__,
279 (ULONG_PTR)Entry);
280 }
281 }
282 }
283
284 VOID
285 NTAPI
286 ExpCheckPoolBlocks(IN PVOID Block)
287 {
288 BOOLEAN FoundBlock = FALSE;
289 SIZE_T Size = 0;
290 PPOOL_HEADER Entry;
291
292 /* Get the first entry for this page, make sure it really is the first */
293 Entry = PAGE_ALIGN(Block);
294 ASSERT(Entry->PreviousSize == 0);
295
296 /* Now scan each entry */
297 while (TRUE)
298 {
299 /* When we actually found our block, remember this */
300 if (Entry == Block) FoundBlock = TRUE;
301
302 /* Now validate this block header */
303 ExpCheckPoolHeader(Entry);
304
305 /* And go to the next one, keeping track of our size */
306 Size += Entry->BlockSize;
307 Entry = POOL_NEXT_BLOCK(Entry);
308
309 /* If we hit the last block, stop */
310 if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break;
311
312 /* If we hit the end of the page, stop */
313 if (PAGE_ALIGN(Entry) == Entry) break;
314 }
315
316 /* We must've found our block, and we must have hit the end of the page */
317 if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock))
318 {
319 /* Otherwise, the blocks are messed up */
320 KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry);
321 }
322 }
323
324 FORCEINLINE
325 VOID
326 ExpCheckPoolIrqlLevel(IN POOL_TYPE PoolType,
327 IN SIZE_T NumberOfBytes,
328 IN PVOID Entry)
329 {
330 //
331 // Validate IRQL: It must be APC_LEVEL or lower for Paged Pool, and it must
332 // be DISPATCH_LEVEL or lower for Non Paged Pool
333 //
334 if (((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) ?
335 (KeGetCurrentIrql() > APC_LEVEL) :
336 (KeGetCurrentIrql() > DISPATCH_LEVEL))
337 {
338 //
339 // Take the system down
340 //
341 KeBugCheckEx(BAD_POOL_CALLER,
342 !Entry ? POOL_ALLOC_IRQL_INVALID : POOL_FREE_IRQL_INVALID,
343 KeGetCurrentIrql(),
344 PoolType,
345 !Entry ? NumberOfBytes : (ULONG_PTR)Entry);
346 }
347 }
348
349 FORCEINLINE
350 ULONG
351 ExpComputeHashForTag(IN ULONG Tag,
352 IN SIZE_T BucketMask)
353 {
354 //
355 // Compute the hash by multiplying with a large prime number and then XORing
356 // with the HIDWORD of the result.
357 //
358 // Finally, AND with the bucket mask to generate a valid index/bucket into
359 // the table
360 //
361 ULONGLONG Result = (ULONGLONG)40543 * Tag;
362 return (ULONG)BucketMask & ((ULONG)Result ^ (Result >> 32));
363 }
364
365 FORCEINLINE
366 ULONG
367 ExpComputePartialHashForAddress(IN PVOID BaseAddress)
368 {
369 ULONG Result;
370 //
371 // Compute the hash by converting the address into a page number, and then
372 // XORing each nibble with the next one.
373 //
374 // We do *NOT* AND with the bucket mask at this point because big table expansion
375 // might happen. Therefore, the final step of the hash must be performed
376 // while holding the expansion pushlock, and this is why we call this a
377 // "partial" hash only.
378 //
379 Result = (ULONG)((ULONG_PTR)BaseAddress >> PAGE_SHIFT);
380 return (Result >> 24) ^ (Result >> 16) ^ (Result >> 8) ^ Result;
381 }
382
383 /* PRIVATE FUNCTIONS **********************************************************/
384
385 VOID
386 NTAPI
387 INIT_FUNCTION
388 ExpSeedHotTags(VOID)
389 {
390 ULONG i, Key, Hash, Index;
391 PPOOL_TRACKER_TABLE TrackTable = PoolTrackTable;
392 ULONG TagList[] =
393 {
394 ' oI',
395 ' laH',
396 'PldM',
397 'LooP',
398 'tSbO',
399 ' prI',
400 'bdDN',
401 'LprI',
402 'pOoI',
403 ' ldM',
404 'eliF',
405 'aVMC',
406 'dSeS',
407 'CFtN',
408 'looP',
409 'rPCT',
410 'bNMC',
411 'dTeS',
412 'sFtN',
413 'TPCT',
414 'CPCT',
415 ' yeK',
416 'qSbO',
417 'mNoI',
418 'aEoI',
419 'cPCT',
420 'aFtN',
421 '0ftN',
422 'tceS',
423 'SprI',
424 'ekoT',
425 ' eS',
426 'lCbO',
427 'cScC',
428 'lFtN',
429 'cAeS',
430 'mfSF',
431 'kWcC',
432 'miSF',
433 'CdfA',
434 'EdfA',
435 'orSF',
436 'nftN',
437 'PRIU',
438 'rFpN',
439 'RFpN',
440 'aPeS',
441 'sUeS',
442 'FpcA',
443 'MpcA',
444 'cSeS',
445 'mNbO',
446 'sFpN',
447 'uLeS',
448 'DPcS',
449 'nevE',
450 'vrqR',
451 'ldaV',
452 ' pP',
453 'SdaV',
454 ' daV',
455 'LdaV',
456 'FdaV',
457 ' GIB',
458 };
459
460 //
461 // Loop all 64 hot tags
462 //
463 ASSERT((sizeof(TagList) / sizeof(ULONG)) == 64);
464 for (i = 0; i < sizeof(TagList) / sizeof(ULONG); i++)
465 {
466 //
467 // Get the current tag, and compute its hash in the tracker table
468 //
469 Key = TagList[i];
470 Hash = ExpComputeHashForTag(Key, PoolTrackTableMask);
471
472 //
473 // Loop all the hashes in this index/bucket
474 //
475 Index = Hash;
476 while (TRUE)
477 {
478 //
479 // Find an empty entry, and make sure this isn't the last hash that
480 // can fit.
481 //
482 // On checked builds, also make sure this is the first time we are
483 // seeding this tag.
484 //
485 ASSERT(TrackTable[Hash].Key != Key);
486 if (!(TrackTable[Hash].Key) && (Hash != PoolTrackTableSize - 1))
487 {
488 //
489 // It has been seeded, move on to the next tag
490 //
491 TrackTable[Hash].Key = Key;
492 break;
493 }
494
495 //
496 // This entry was already taken, compute the next possible hash while
497 // making sure we're not back at our initial index.
498 //
499 ASSERT(TrackTable[Hash].Key != Key);
500 Hash = (Hash + 1) & PoolTrackTableMask;
501 if (Hash == Index) break;
502 }
503 }
504 }
505
506 VOID
507 NTAPI
508 ExpRemovePoolTracker(IN ULONG Key,
509 IN SIZE_T NumberOfBytes,
510 IN POOL_TYPE PoolType)
511 {
512 ULONG Hash, Index;
513 PPOOL_TRACKER_TABLE Table, TableEntry;
514 SIZE_T TableMask, TableSize;
515
516 //
517 // Remove the PROTECTED_POOL flag which is not part of the tag
518 //
519 Key &= ~PROTECTED_POOL;
520
521 //
522 // With WinDBG you can set a tag you want to break on when an allocation is
523 // attempted
524 //
525 if (Key == PoolHitTag) DbgBreakPoint();
526
527 //
528 // Why the double indirection? Because normally this function is also used
529 // when doing session pool allocations, which has another set of tables,
530 // sizes, and masks that live in session pool. Now we don't support session
531 // pool so we only ever use the regular tables, but I'm keeping the code this
532 // way so that the day we DO support session pool, it won't require that
533 // many changes
534 //
535 Table = PoolTrackTable;
536 TableMask = PoolTrackTableMask;
537 TableSize = PoolTrackTableSize;
538
539 //
540 // Compute the hash for this key, and loop all the possible buckets
541 //
542 Hash = ExpComputeHashForTag(Key, TableMask);
543 Index = Hash;
544 while (TRUE)
545 {
546 //
547 // Have we found the entry for this tag? */
548 //
549 TableEntry = &Table[Hash];
550 if (TableEntry->Key == Key)
551 {
552 //
553 // Decrement the counters depending on if this was paged or nonpaged
554 // pool
555 //
556 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
557 {
558 InterlockedIncrement(&TableEntry->NonPagedFrees);
559 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes,
560 -(SSIZE_T)NumberOfBytes);
561 return;
562 }
563 InterlockedIncrement(&TableEntry->PagedFrees);
564 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes,
565 -(SSIZE_T)NumberOfBytes);
566 return;
567 }
568
569 //
570 // We should have only ended up with an empty entry if we've reached
571 // the last bucket
572 //
573 if (!TableEntry->Key)
574 {
575 DPRINT1("Empty item reached in tracker table. Hash=0x%lx, TableMask=0x%lx, Tag=0x%08lx, NumberOfBytes=%lu, PoolType=%d\n",
576 Hash, TableMask, Key, (ULONG)NumberOfBytes, PoolType);
577 ASSERT(Hash == TableMask);
578 }
579
580 //
581 // This path is hit when we don't have an entry, and the current bucket
582 // is full, so we simply try the next one
583 //
584 Hash = (Hash + 1) & TableMask;
585 if (Hash == Index) break;
586 }
587
588 //
589 // And finally this path is hit when all the buckets are full, and we need
590 // some expansion. This path is not yet supported in ReactOS and so we'll
591 // ignore the tag
592 //
593 DPRINT1("Out of pool tag space, ignoring...\n");
594 }
595
596 VOID
597 NTAPI
598 ExpInsertPoolTracker(IN ULONG Key,
599 IN SIZE_T NumberOfBytes,
600 IN POOL_TYPE PoolType)
601 {
602 ULONG Hash, Index;
603 KIRQL OldIrql;
604 PPOOL_TRACKER_TABLE Table, TableEntry;
605 SIZE_T TableMask, TableSize;
606
607 //
608 // Remove the PROTECTED_POOL flag which is not part of the tag
609 //
610 Key &= ~PROTECTED_POOL;
611
612 //
613 // With WinDBG you can set a tag you want to break on when an allocation is
614 // attempted
615 //
616 if (Key == PoolHitTag) DbgBreakPoint();
617
618 //
619 // There is also an internal flag you can set to break on malformed tags
620 //
621 if (ExStopBadTags) ASSERT(Key & 0xFFFFFF00);
622
623 //
624 // ASSERT on ReactOS features not yet supported
625 //
626 ASSERT(!(PoolType & SESSION_POOL_MASK));
627 ASSERT(KeGetCurrentProcessorNumber() == 0);
628
629 //
630 // Why the double indirection? Because normally this function is also used
631 // when doing session pool allocations, which has another set of tables,
632 // sizes, and masks that live in session pool. Now we don't support session
633 // pool so we only ever use the regular tables, but I'm keeping the code this
634 // way so that the day we DO support session pool, it won't require that
635 // many changes
636 //
637 Table = PoolTrackTable;
638 TableMask = PoolTrackTableMask;
639 TableSize = PoolTrackTableSize;
640
641 //
642 // Compute the hash for this key, and loop all the possible buckets
643 //
644 Hash = ExpComputeHashForTag(Key, TableMask);
645 Index = Hash;
646 while (TRUE)
647 {
648 //
649 // Do we already have an entry for this tag? */
650 //
651 TableEntry = &Table[Hash];
652 if (TableEntry->Key == Key)
653 {
654 //
655 // Increment the counters depending on if this was paged or nonpaged
656 // pool
657 //
658 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
659 {
660 InterlockedIncrement(&TableEntry->NonPagedAllocs);
661 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes, NumberOfBytes);
662 return;
663 }
664 InterlockedIncrement(&TableEntry->PagedAllocs);
665 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes, NumberOfBytes);
666 return;
667 }
668
669 //
670 // We don't have an entry yet, but we've found a free bucket for it
671 //
672 if (!(TableEntry->Key) && (Hash != PoolTrackTableSize - 1))
673 {
674 //
675 // We need to hold the lock while creating a new entry, since other
676 // processors might be in this code path as well
677 //
678 ExAcquireSpinLock(&ExpTaggedPoolLock, &OldIrql);
679 if (!PoolTrackTable[Hash].Key)
680 {
681 //
682 // We've won the race, so now create this entry in the bucket
683 //
684 ASSERT(Table[Hash].Key == 0);
685 PoolTrackTable[Hash].Key = Key;
686 TableEntry->Key = Key;
687 }
688 ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql);
689
690 //
691 // Now we force the loop to run again, and we should now end up in
692 // the code path above which does the interlocked increments...
693 //
694 continue;
695 }
696
697 //
698 // This path is hit when we don't have an entry, and the current bucket
699 // is full, so we simply try the next one
700 //
701 Hash = (Hash + 1) & TableMask;
702 if (Hash == Index) break;
703 }
704
705 //
706 // And finally this path is hit when all the buckets are full, and we need
707 // some expansion. This path is not yet supported in ReactOS and so we'll
708 // ignore the tag
709 //
710 DPRINT1("Out of pool tag space, ignoring...\n");
711 }
712
713 VOID
714 NTAPI
715 INIT_FUNCTION
716 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor,
717 IN POOL_TYPE PoolType,
718 IN ULONG PoolIndex,
719 IN ULONG Threshold,
720 IN PVOID PoolLock)
721 {
722 PLIST_ENTRY NextEntry, LastEntry;
723
724 //
725 // Setup the descriptor based on the caller's request
726 //
727 PoolDescriptor->PoolType = PoolType;
728 PoolDescriptor->PoolIndex = PoolIndex;
729 PoolDescriptor->Threshold = Threshold;
730 PoolDescriptor->LockAddress = PoolLock;
731
732 //
733 // Initialize accounting data
734 //
735 PoolDescriptor->RunningAllocs = 0;
736 PoolDescriptor->RunningDeAllocs = 0;
737 PoolDescriptor->TotalPages = 0;
738 PoolDescriptor->TotalBytes = 0;
739 PoolDescriptor->TotalBigPages = 0;
740
741 //
742 // Nothing pending for now
743 //
744 PoolDescriptor->PendingFrees = NULL;
745 PoolDescriptor->PendingFreeDepth = 0;
746
747 //
748 // Loop all the descriptor's allocation lists and initialize them
749 //
750 NextEntry = PoolDescriptor->ListHeads;
751 LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
752 while (NextEntry < LastEntry)
753 {
754 ExpInitializePoolListHead(NextEntry);
755 NextEntry++;
756 }
757
758 //
759 // Note that ReactOS does not support Session Pool Yet
760 //
761 ASSERT(PoolType != PagedPoolSession);
762 }
763
764 VOID
765 NTAPI
766 INIT_FUNCTION
767 InitializePool(IN POOL_TYPE PoolType,
768 IN ULONG Threshold)
769 {
770 PPOOL_DESCRIPTOR Descriptor;
771 SIZE_T TableSize;
772 ULONG i;
773
774 //
775 // Check what kind of pool this is
776 //
777 if (PoolType == NonPagedPool)
778 {
779 //
780 // Compute the track table size and convert it from a power of two to an
781 // actual byte size
782 //
783 // NOTE: On checked builds, we'll assert if the registry table size was
784 // invalid, while on retail builds we'll just break out of the loop at
785 // that point.
786 //
787 TableSize = min(PoolTrackTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
788 for (i = 0; i < 32; i++)
789 {
790 if (TableSize & 1)
791 {
792 ASSERT((TableSize & ~1) == 0);
793 if (!(TableSize & ~1)) break;
794 }
795 TableSize >>= 1;
796 }
797
798 //
799 // If we hit bit 32, than no size was defined in the registry, so
800 // we'll use the default size of 2048 entries.
801 //
802 // Otherwise, use the size from the registry, as long as it's not
803 // smaller than 64 entries.
804 //
805 if (i == 32)
806 {
807 PoolTrackTableSize = 2048;
808 }
809 else
810 {
811 PoolTrackTableSize = max(1 << i, 64);
812 }
813
814 //
815 // Loop trying with the biggest specified size first, and cut it down
816 // by a power of two each iteration in case not enough memory exist
817 //
818 while (TRUE)
819 {
820 //
821 // Do not allow overflow
822 //
823 if ((PoolTrackTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_TABLE)))
824 {
825 PoolTrackTableSize >>= 1;
826 continue;
827 }
828
829 //
830 // Allocate the tracker table and exit the loop if this worked
831 //
832 PoolTrackTable = MiAllocatePoolPages(NonPagedPool,
833 (PoolTrackTableSize + 1) *
834 sizeof(POOL_TRACKER_TABLE));
835 if (PoolTrackTable) break;
836
837 //
838 // Otherwise, as long as we're not down to the last bit, keep
839 // iterating
840 //
841 if (PoolTrackTableSize == 1)
842 {
843 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
844 TableSize,
845 0xFFFFFFFF,
846 0xFFFFFFFF,
847 0xFFFFFFFF);
848 }
849 PoolTrackTableSize >>= 1;
850 }
851
852 //
853 // Finally, add one entry, compute the hash, and zero the table
854 //
855 PoolTrackTableSize++;
856 PoolTrackTableMask = PoolTrackTableSize - 2;
857
858 RtlZeroMemory(PoolTrackTable,
859 PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
860
861 //
862 // We now do the exact same thing with the tracker table for big pages
863 //
864 TableSize = min(PoolBigPageTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
865 for (i = 0; i < 32; i++)
866 {
867 if (TableSize & 1)
868 {
869 ASSERT((TableSize & ~1) == 0);
870 if (!(TableSize & ~1)) break;
871 }
872 TableSize >>= 1;
873 }
874
875 //
876 // For big pages, the default tracker table is 4096 entries, while the
877 // minimum is still 64
878 //
879 if (i == 32)
880 {
881 PoolBigPageTableSize = 4096;
882 }
883 else
884 {
885 PoolBigPageTableSize = max(1 << i, 64);
886 }
887
888 //
889 // Again, run the exact same loop we ran earlier, but this time for the
890 // big pool tracker instead
891 //
892 while (TRUE)
893 {
894 if ((PoolBigPageTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_BIG_PAGES)))
895 {
896 PoolBigPageTableSize >>= 1;
897 continue;
898 }
899
900 PoolBigPageTable = MiAllocatePoolPages(NonPagedPool,
901 PoolBigPageTableSize *
902 sizeof(POOL_TRACKER_BIG_PAGES));
903 if (PoolBigPageTable) break;
904
905 if (PoolBigPageTableSize == 1)
906 {
907 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
908 TableSize,
909 0xFFFFFFFF,
910 0xFFFFFFFF,
911 0xFFFFFFFF);
912 }
913
914 PoolBigPageTableSize >>= 1;
915 }
916
917 //
918 // An extra entry is not needed for for the big pool tracker, so just
919 // compute the hash and zero it
920 //
921 PoolBigPageTableHash = PoolBigPageTableSize - 1;
922 RtlZeroMemory(PoolBigPageTable,
923 PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
924 for (i = 0; i < PoolBigPageTableSize; i++) PoolBigPageTable[i].Va = (PVOID)1;
925
926 //
927 // During development, print this out so we can see what's happening
928 //
929 DPRINT1("EXPOOL: Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
930 PoolTrackTable, PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
931 DPRINT1("EXPOOL: Big Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
932 PoolBigPageTable, PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
933
934 //
935 // Insert the generic tracker for all of big pool
936 //
937 ExpInsertPoolTracker('looP',
938 ROUND_TO_PAGES(PoolBigPageTableSize *
939 sizeof(POOL_TRACKER_BIG_PAGES)),
940 NonPagedPool);
941
942 //
943 // No support for NUMA systems at this time
944 //
945 ASSERT(KeNumberNodes == 1);
946
947 //
948 // Initialize the tag spinlock
949 //
950 KeInitializeSpinLock(&ExpTaggedPoolLock);
951
952 //
953 // Initialize the nonpaged pool descriptor
954 //
955 PoolVector[NonPagedPool] = &NonPagedPoolDescriptor;
956 ExInitializePoolDescriptor(PoolVector[NonPagedPool],
957 NonPagedPool,
958 0,
959 Threshold,
960 NULL);
961 }
962 else
963 {
964 //
965 // No support for NUMA systems at this time
966 //
967 ASSERT(KeNumberNodes == 1);
968
969 //
970 // Allocate the pool descriptor
971 //
972 Descriptor = ExAllocatePoolWithTag(NonPagedPool,
973 sizeof(KGUARDED_MUTEX) +
974 sizeof(POOL_DESCRIPTOR),
975 'looP');
976 if (!Descriptor)
977 {
978 //
979 // This is really bad...
980 //
981 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
982 0,
983 -1,
984 -1,
985 -1);
986 }
987
988 //
989 // Setup the vector and guarded mutex for paged pool
990 //
991 PoolVector[PagedPool] = Descriptor;
992 ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1);
993 ExpPagedPoolDescriptor[0] = Descriptor;
994 KeInitializeGuardedMutex(ExpPagedPoolMutex);
995 ExInitializePoolDescriptor(Descriptor,
996 PagedPool,
997 0,
998 Threshold,
999 ExpPagedPoolMutex);
1000
1001 //
1002 // Insert the generic tracker for all of nonpaged pool
1003 //
1004 ExpInsertPoolTracker('looP',
1005 ROUND_TO_PAGES(PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)),
1006 NonPagedPool);
1007 }
1008 }
1009
1010 FORCEINLINE
1011 KIRQL
1012 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
1013 {
1014 //
1015 // Check if this is nonpaged pool
1016 //
1017 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1018 {
1019 //
1020 // Use the queued spin lock
1021 //
1022 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
1023 }
1024 else
1025 {
1026 //
1027 // Use the guarded mutex
1028 //
1029 KeAcquireGuardedMutex(Descriptor->LockAddress);
1030 return APC_LEVEL;
1031 }
1032 }
1033
1034 FORCEINLINE
1035 VOID
1036 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor,
1037 IN KIRQL OldIrql)
1038 {
1039 //
1040 // Check if this is nonpaged pool
1041 //
1042 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1043 {
1044 //
1045 // Use the queued spin lock
1046 //
1047 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
1048 }
1049 else
1050 {
1051 //
1052 // Use the guarded mutex
1053 //
1054 KeReleaseGuardedMutex(Descriptor->LockAddress);
1055 }
1056 }
1057
1058 VOID
1059 NTAPI
1060 ExpGetPoolTagInfoTarget(IN PKDPC Dpc,
1061 IN PVOID DeferredContext,
1062 IN PVOID SystemArgument1,
1063 IN PVOID SystemArgument2)
1064 {
1065 PPOOL_DPC_CONTEXT Context = DeferredContext;
1066 UNREFERENCED_PARAMETER(Dpc);
1067 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
1068
1069 //
1070 // Make sure we win the race, and if we did, copy the data atomically
1071 //
1072 if (KeSignalCallDpcSynchronize(SystemArgument2))
1073 {
1074 RtlCopyMemory(Context->PoolTrackTable,
1075 PoolTrackTable,
1076 Context->PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1077
1078 //
1079 // This is here because ReactOS does not yet support expansion
1080 //
1081 ASSERT(Context->PoolTrackTableSizeExpansion == 0);
1082 }
1083
1084 //
1085 // Regardless of whether we won or not, we must now synchronize and then
1086 // decrement the barrier since this is one more processor that has completed
1087 // the callback.
1088 //
1089 KeSignalCallDpcSynchronize(SystemArgument2);
1090 KeSignalCallDpcDone(SystemArgument1);
1091 }
1092
1093 NTSTATUS
1094 NTAPI
1095 ExGetPoolTagInfo(IN PSYSTEM_POOLTAG_INFORMATION SystemInformation,
1096 IN ULONG SystemInformationLength,
1097 IN OUT PULONG ReturnLength OPTIONAL)
1098 {
1099 ULONG TableSize, CurrentLength;
1100 ULONG EntryCount;
1101 NTSTATUS Status = STATUS_SUCCESS;
1102 PSYSTEM_POOLTAG TagEntry;
1103 PPOOL_TRACKER_TABLE Buffer, TrackerEntry;
1104 POOL_DPC_CONTEXT Context;
1105 ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
1106
1107 //
1108 // Keep track of how much data the caller's buffer must hold
1109 //
1110 CurrentLength = FIELD_OFFSET(SYSTEM_POOLTAG_INFORMATION, TagInfo);
1111
1112 //
1113 // Initialize the caller's buffer
1114 //
1115 TagEntry = &SystemInformation->TagInfo[0];
1116 SystemInformation->Count = 0;
1117
1118 //
1119 // Capture the number of entries, and the total size needed to make a copy
1120 // of the table
1121 //
1122 EntryCount = (ULONG)PoolTrackTableSize;
1123 TableSize = EntryCount * sizeof(POOL_TRACKER_TABLE);
1124
1125 //
1126 // Allocate the "Generic DPC" temporary buffer
1127 //
1128 Buffer = ExAllocatePoolWithTag(NonPagedPool, TableSize, 'ofnI');
1129 if (!Buffer) return STATUS_INSUFFICIENT_RESOURCES;
1130
1131 //
1132 // Do a "Generic DPC" to atomically retrieve the tag and allocation data
1133 //
1134 Context.PoolTrackTable = Buffer;
1135 Context.PoolTrackTableSize = PoolTrackTableSize;
1136 Context.PoolTrackTableExpansion = NULL;
1137 Context.PoolTrackTableSizeExpansion = 0;
1138 KeGenericCallDpc(ExpGetPoolTagInfoTarget, &Context);
1139
1140 //
1141 // Now parse the results
1142 //
1143 for (TrackerEntry = Buffer; TrackerEntry < (Buffer + EntryCount); TrackerEntry++)
1144 {
1145 //
1146 // If the entry is empty, skip it
1147 //
1148 if (!TrackerEntry->Key) continue;
1149
1150 //
1151 // Otherwise, add one more entry to the caller's buffer, and ensure that
1152 // enough space has been allocated in it
1153 //
1154 SystemInformation->Count++;
1155 CurrentLength += sizeof(*TagEntry);
1156 if (SystemInformationLength < CurrentLength)
1157 {
1158 //
1159 // The caller's buffer is too small, so set a failure code. The
1160 // caller will know the count, as well as how much space is needed.
1161 //
1162 // We do NOT break out of the loop, because we want to keep incrementing
1163 // the Count as well as CurrentLength so that the caller can know the
1164 // final numbers
1165 //
1166 Status = STATUS_INFO_LENGTH_MISMATCH;
1167 }
1168 else
1169 {
1170 //
1171 // Small sanity check that our accounting is working correctly
1172 //
1173 ASSERT(TrackerEntry->PagedAllocs >= TrackerEntry->PagedFrees);
1174 ASSERT(TrackerEntry->NonPagedAllocs >= TrackerEntry->NonPagedFrees);
1175
1176 //
1177 // Return the data into the caller's buffer
1178 //
1179 TagEntry->TagUlong = TrackerEntry->Key;
1180 TagEntry->PagedAllocs = TrackerEntry->PagedAllocs;
1181 TagEntry->PagedFrees = TrackerEntry->PagedFrees;
1182 TagEntry->PagedUsed = TrackerEntry->PagedBytes;
1183 TagEntry->NonPagedAllocs = TrackerEntry->NonPagedAllocs;
1184 TagEntry->NonPagedFrees = TrackerEntry->NonPagedFrees;
1185 TagEntry->NonPagedUsed = TrackerEntry->NonPagedBytes;
1186 TagEntry++;
1187 }
1188 }
1189
1190 //
1191 // Free the "Generic DPC" temporary buffer, return the buffer length and status
1192 //
1193 ExFreePoolWithTag(Buffer, 'ofnI');
1194 if (ReturnLength) *ReturnLength = CurrentLength;
1195 return Status;
1196 }
1197
1198 BOOLEAN
1199 NTAPI
1200 ExpAddTagForBigPages(IN PVOID Va,
1201 IN ULONG Key,
1202 IN ULONG NumberOfPages,
1203 IN POOL_TYPE PoolType)
1204 {
1205 ULONG Hash, i = 0;
1206 PVOID OldVa;
1207 KIRQL OldIrql;
1208 SIZE_T TableSize;
1209 PPOOL_TRACKER_BIG_PAGES Entry, EntryEnd, EntryStart;
1210 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1211 ASSERT(!(PoolType & SESSION_POOL_MASK));
1212
1213 //
1214 // As the table is expandable, these values must only be read after acquiring
1215 // the lock to avoid a teared access during an expansion
1216 //
1217 Hash = ExpComputePartialHashForAddress(Va);
1218 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1219 Hash &= PoolBigPageTableHash;
1220 TableSize = PoolBigPageTableSize;
1221
1222 //
1223 // We loop from the current hash bucket to the end of the table, and then
1224 // rollover to hash bucket 0 and keep going from there. If we return back
1225 // to the beginning, then we attempt expansion at the bottom of the loop
1226 //
1227 EntryStart = Entry = &PoolBigPageTable[Hash];
1228 EntryEnd = &PoolBigPageTable[TableSize];
1229 do
1230 {
1231 //
1232 // Make sure that this is a free entry and attempt to atomically make the
1233 // entry busy now
1234 //
1235 OldVa = Entry->Va;
1236 if (((ULONG_PTR)OldVa & POOL_BIG_TABLE_ENTRY_FREE) &&
1237 (InterlockedCompareExchangePointer(&Entry->Va, Va, OldVa) == OldVa))
1238 {
1239 //
1240 // We now own this entry, write down the size and the pool tag
1241 //
1242 Entry->Key = Key;
1243 Entry->NumberOfPages = NumberOfPages;
1244
1245 //
1246 // Add one more entry to the count, and see if we're getting within
1247 // 25% of the table size, at which point we'll do an expansion now
1248 // to avoid blocking too hard later on.
1249 //
1250 // Note that we only do this if it's also been the 16th time that we
1251 // keep losing the race or that we are not finding a free entry anymore,
1252 // which implies a massive number of concurrent big pool allocations.
1253 //
1254 InterlockedIncrementUL(&ExpPoolBigEntriesInUse);
1255 if ((i >= 16) && (ExpPoolBigEntriesInUse > (TableSize / 4)))
1256 {
1257 DPRINT1("Should attempt expansion since we now have %lu entries\n",
1258 ExpPoolBigEntriesInUse);
1259 }
1260
1261 //
1262 // We have our entry, return
1263 //
1264 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1265 return TRUE;
1266 }
1267
1268 //
1269 // We don't have our entry yet, so keep trying, making the entry list
1270 // circular if we reach the last entry. We'll eventually break out of
1271 // the loop once we've rolled over and returned back to our original
1272 // hash bucket
1273 //
1274 i++;
1275 if (++Entry >= EntryEnd) Entry = &PoolBigPageTable[0];
1276 } while (Entry != EntryStart);
1277
1278 //
1279 // This means there's no free hash buckets whatsoever, so we would now have
1280 // to attempt expanding the table
1281 //
1282 DPRINT1("Big pool expansion needed, not implemented!\n");
1283 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1284 return FALSE;
1285 }
1286
1287 ULONG
1288 NTAPI
1289 ExpFindAndRemoveTagBigPages(IN PVOID Va,
1290 OUT PULONG_PTR BigPages,
1291 IN POOL_TYPE PoolType)
1292 {
1293 BOOLEAN FirstTry = TRUE;
1294 SIZE_T TableSize;
1295 KIRQL OldIrql;
1296 ULONG PoolTag, Hash;
1297 PPOOL_TRACKER_BIG_PAGES Entry;
1298 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1299 ASSERT(!(PoolType & SESSION_POOL_MASK));
1300
1301 //
1302 // As the table is expandable, these values must only be read after acquiring
1303 // the lock to avoid a teared access during an expansion
1304 //
1305 Hash = ExpComputePartialHashForAddress(Va);
1306 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1307 Hash &= PoolBigPageTableHash;
1308 TableSize = PoolBigPageTableSize;
1309
1310 //
1311 // Loop while trying to find this big page allocation
1312 //
1313 while (PoolBigPageTable[Hash].Va != Va)
1314 {
1315 //
1316 // Increment the size until we go past the end of the table
1317 //
1318 if (++Hash >= TableSize)
1319 {
1320 //
1321 // Is this the second time we've tried?
1322 //
1323 if (!FirstTry)
1324 {
1325 //
1326 // This means it was never inserted into the pool table and it
1327 // received the special "BIG" tag -- return that and return 0
1328 // so that the code can ask Mm for the page count instead
1329 //
1330 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1331 *BigPages = 0;
1332 return ' GIB';
1333 }
1334
1335 //
1336 // The first time this happens, reset the hash index and try again
1337 //
1338 Hash = 0;
1339 FirstTry = FALSE;
1340 }
1341 }
1342
1343 //
1344 // Now capture all the information we need from the entry, since after we
1345 // release the lock, the data can change
1346 //
1347 Entry = &PoolBigPageTable[Hash];
1348 *BigPages = Entry->NumberOfPages;
1349 PoolTag = Entry->Key;
1350
1351 //
1352 // Set the free bit, and decrement the number of allocations. Finally, release
1353 // the lock and return the tag that was located
1354 //
1355 InterlockedIncrement((PLONG)&Entry->Va);
1356 InterlockedDecrementUL(&ExpPoolBigEntriesInUse);
1357 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1358 return PoolTag;
1359 }
1360
1361 VOID
1362 NTAPI
1363 ExQueryPoolUsage(OUT PULONG PagedPoolPages,
1364 OUT PULONG NonPagedPoolPages,
1365 OUT PULONG PagedPoolAllocs,
1366 OUT PULONG PagedPoolFrees,
1367 OUT PULONG PagedPoolLookasideHits,
1368 OUT PULONG NonPagedPoolAllocs,
1369 OUT PULONG NonPagedPoolFrees,
1370 OUT PULONG NonPagedPoolLookasideHits)
1371 {
1372 ULONG i;
1373 PPOOL_DESCRIPTOR PoolDesc;
1374
1375 //
1376 // Assume all failures
1377 //
1378 *PagedPoolPages = 0;
1379 *PagedPoolAllocs = 0;
1380 *PagedPoolFrees = 0;
1381
1382 //
1383 // Tally up the totals for all the apged pool
1384 //
1385 for (i = 0; i < ExpNumberOfPagedPools + 1; i++)
1386 {
1387 PoolDesc = ExpPagedPoolDescriptor[i];
1388 *PagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1389 *PagedPoolAllocs += PoolDesc->RunningAllocs;
1390 *PagedPoolFrees += PoolDesc->RunningDeAllocs;
1391 }
1392
1393 //
1394 // The first non-paged pool has a hardcoded well-known descriptor name
1395 //
1396 PoolDesc = &NonPagedPoolDescriptor;
1397 *NonPagedPoolPages = PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1398 *NonPagedPoolAllocs = PoolDesc->RunningAllocs;
1399 *NonPagedPoolFrees = PoolDesc->RunningDeAllocs;
1400
1401 //
1402 // If the system has more than one non-paged pool, copy the other descriptor
1403 // totals as well
1404 //
1405 #if 0
1406 if (ExpNumberOfNonPagedPools > 1)
1407 {
1408 for (i = 0; i < ExpNumberOfNonPagedPools; i++)
1409 {
1410 PoolDesc = ExpNonPagedPoolDescriptor[i];
1411 *NonPagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1412 *NonPagedPoolAllocs += PoolDesc->RunningAllocs;
1413 *NonPagedPoolFrees += PoolDesc->RunningDeAllocs;
1414 }
1415 }
1416 #endif
1417
1418 //
1419 // FIXME: Not yet supported
1420 //
1421 *NonPagedPoolLookasideHits += 0;
1422 *PagedPoolLookasideHits += 0;
1423 }
1424
1425 /* PUBLIC FUNCTIONS ***********************************************************/
1426
1427 /*
1428 * @implemented
1429 */
1430 PVOID
1431 NTAPI
1432 ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
1433 IN SIZE_T NumberOfBytes,
1434 IN ULONG Tag)
1435 {
1436 PPOOL_DESCRIPTOR PoolDesc;
1437 PLIST_ENTRY ListHead;
1438 PPOOL_HEADER Entry, NextEntry, FragmentEntry;
1439 KIRQL OldIrql;
1440 USHORT BlockSize, i;
1441 ULONG OriginalType;
1442 PKPRCB Prcb = KeGetCurrentPrcb();
1443 PGENERAL_LOOKASIDE LookasideList;
1444
1445 //
1446 // Some sanity checks
1447 //
1448 ASSERT(Tag != 0);
1449 ASSERT(Tag != ' GIB');
1450 ASSERT(NumberOfBytes != 0);
1451 ExpCheckPoolIrqlLevel(PoolType, NumberOfBytes, NULL);
1452
1453 //
1454 // Not supported in ReactOS
1455 //
1456 ASSERT(!(PoolType & SESSION_POOL_MASK));
1457
1458 //
1459 // Check if verifier or special pool is enabled
1460 //
1461 if (ExpPoolFlags & (POOL_FLAG_VERIFIER | POOL_FLAG_SPECIAL_POOL))
1462 {
1463 //
1464 // For verifier, we should call the verification routine
1465 //
1466 if (ExpPoolFlags & POOL_FLAG_VERIFIER)
1467 {
1468 DPRINT1("Driver Verifier is not yet supported\n");
1469 }
1470
1471 //
1472 // For special pool, we check if this is a suitable allocation and do
1473 // the special allocation if needed
1474 //
1475 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
1476 {
1477 //
1478 // Check if this is a special pool allocation
1479 //
1480 if (MmUseSpecialPool(NumberOfBytes, Tag))
1481 {
1482 //
1483 // Try to allocate using special pool
1484 //
1485 Entry = MmAllocateSpecialPool(NumberOfBytes, Tag, PoolType, 2);
1486 if (Entry) return Entry;
1487 }
1488 }
1489 }
1490
1491 //
1492 // Get the pool type and its corresponding vector for this request
1493 //
1494 OriginalType = PoolType;
1495 PoolType = PoolType & BASE_POOL_TYPE_MASK;
1496 PoolDesc = PoolVector[PoolType];
1497 ASSERT(PoolDesc != NULL);
1498
1499 //
1500 // Check if this is a big page allocation
1501 //
1502 if (NumberOfBytes > POOL_MAX_ALLOC)
1503 {
1504 //
1505 // Allocate pages for it
1506 //
1507 Entry = MiAllocatePoolPages(OriginalType, NumberOfBytes);
1508 if (!Entry)
1509 {
1510 //
1511 // Must succeed pool is deprecated, but still supported. These allocation
1512 // failures must cause an immediate bugcheck
1513 //
1514 if (OriginalType & MUST_SUCCEED_POOL_MASK)
1515 {
1516 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1517 NumberOfBytes,
1518 NonPagedPoolDescriptor.TotalPages,
1519 NonPagedPoolDescriptor.TotalBigPages,
1520 0);
1521 }
1522
1523 //
1524 // Internal debugging
1525 //
1526 ExPoolFailures++;
1527
1528 //
1529 // This flag requests printing failures, and can also further specify
1530 // breaking on failures
1531 //
1532 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
1533 {
1534 DPRINT1("EX: ExAllocatePool (%p, 0x%x) returning NULL\n",
1535 NumberOfBytes,
1536 OriginalType);
1537 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
1538 }
1539
1540 //
1541 // Finally, this flag requests an exception, which we are more than
1542 // happy to raise!
1543 //
1544 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
1545 {
1546 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
1547 }
1548 }
1549
1550 //
1551 // Increment required counters
1552 //
1553 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
1554 (LONG)BYTES_TO_PAGES(NumberOfBytes));
1555 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, NumberOfBytes);
1556 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
1557
1558 //
1559 // Add a tag for the big page allocation and switch to the generic "BIG"
1560 // tag if we failed to do so, then insert a tracker for this alloation.
1561 //
1562 if (!ExpAddTagForBigPages(Entry,
1563 Tag,
1564 (ULONG)BYTES_TO_PAGES(NumberOfBytes),
1565 OriginalType))
1566 {
1567 Tag = ' GIB';
1568 }
1569 ExpInsertPoolTracker(Tag, ROUND_TO_PAGES(NumberOfBytes), OriginalType);
1570 return Entry;
1571 }
1572
1573 //
1574 // Should never request 0 bytes from the pool, but since so many drivers do
1575 // it, we'll just assume they want 1 byte, based on NT's similar behavior
1576 //
1577 if (!NumberOfBytes) NumberOfBytes = 1;
1578
1579 //
1580 // A pool allocation is defined by its data, a linked list to connect it to
1581 // the free list (if necessary), and a pool header to store accounting info.
1582 // Calculate this size, then convert it into a block size (units of pool
1583 // headers)
1584 //
1585 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
1586 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
1587 // the direct allocation of pages.
1588 //
1589 i = (USHORT)((NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1))
1590 / POOL_BLOCK_SIZE);
1591 ASSERT(i < POOL_LISTS_PER_PAGE);
1592
1593 //
1594 // Handle lookaside list optimization for both paged and nonpaged pool
1595 //
1596 if (i <= MAXIMUM_PROCESSORS)
1597 {
1598 //
1599 // Try popping it from the per-CPU lookaside list
1600 //
1601 LookasideList = (PoolType == PagedPool) ?
1602 Prcb->PPPagedLookasideList[i - 1].P :
1603 Prcb->PPNPagedLookasideList[i - 1].P;
1604 LookasideList->TotalAllocates++;
1605 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
1606 if (!Entry)
1607 {
1608 //
1609 // We failed, try popping it from the global list
1610 //
1611 LookasideList = (PoolType == PagedPool) ?
1612 Prcb->PPPagedLookasideList[i - 1].L :
1613 Prcb->PPNPagedLookasideList[i - 1].L;
1614 LookasideList->TotalAllocates++;
1615 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
1616 }
1617
1618 //
1619 // If we were able to pop it, update the accounting and return the block
1620 //
1621 if (Entry)
1622 {
1623 LookasideList->AllocateHits++;
1624
1625 //
1626 // Get the real entry, write down its pool type, and track it
1627 //
1628 Entry--;
1629 Entry->PoolType = PoolType + 1;
1630 ExpInsertPoolTracker(Tag,
1631 Entry->BlockSize * POOL_BLOCK_SIZE,
1632 OriginalType);
1633
1634 //
1635 // Return the pool allocation
1636 //
1637 Entry->PoolTag = Tag;
1638 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
1639 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
1640 return POOL_FREE_BLOCK(Entry);
1641 }
1642 }
1643
1644 //
1645 // Loop in the free lists looking for a block if this size. Start with the
1646 // list optimized for this kind of size lookup
1647 //
1648 ListHead = &PoolDesc->ListHeads[i];
1649 do
1650 {
1651 //
1652 // Are there any free entries available on this list?
1653 //
1654 if (!ExpIsPoolListEmpty(ListHead))
1655 {
1656 //
1657 // Acquire the pool lock now
1658 //
1659 OldIrql = ExLockPool(PoolDesc);
1660
1661 //
1662 // And make sure the list still has entries
1663 //
1664 if (ExpIsPoolListEmpty(ListHead))
1665 {
1666 //
1667 // Someone raced us (and won) before we had a chance to acquire
1668 // the lock.
1669 //
1670 // Try again!
1671 //
1672 ExUnlockPool(PoolDesc, OldIrql);
1673 continue;
1674 }
1675
1676 //
1677 // Remove a free entry from the list
1678 // Note that due to the way we insert free blocks into multiple lists
1679 // there is a guarantee that any block on this list will either be
1680 // of the correct size, or perhaps larger.
1681 //
1682 ExpCheckPoolLinks(ListHead);
1683 Entry = POOL_ENTRY(ExpRemovePoolHeadList(ListHead));
1684 ExpCheckPoolLinks(ListHead);
1685 ExpCheckPoolBlocks(Entry);
1686 ASSERT(Entry->BlockSize >= i);
1687 ASSERT(Entry->PoolType == 0);
1688
1689 //
1690 // Check if this block is larger that what we need. The block could
1691 // not possibly be smaller, due to the reason explained above (and
1692 // we would've asserted on a checked build if this was the case).
1693 //
1694 if (Entry->BlockSize != i)
1695 {
1696 //
1697 // Is there an entry before this one?
1698 //
1699 if (Entry->PreviousSize == 0)
1700 {
1701 //
1702 // There isn't anyone before us, so take the next block and
1703 // turn it into a fragment that contains the leftover data
1704 // that we don't need to satisfy the caller's request
1705 //
1706 FragmentEntry = POOL_BLOCK(Entry, i);
1707 FragmentEntry->BlockSize = Entry->BlockSize - i;
1708
1709 //
1710 // And make it point back to us
1711 //
1712 FragmentEntry->PreviousSize = i;
1713
1714 //
1715 // Now get the block that follows the new fragment and check
1716 // if it's still on the same page as us (and not at the end)
1717 //
1718 NextEntry = POOL_NEXT_BLOCK(FragmentEntry);
1719 if (PAGE_ALIGN(NextEntry) != NextEntry)
1720 {
1721 //
1722 // Adjust this next block to point to our newly created
1723 // fragment block
1724 //
1725 NextEntry->PreviousSize = FragmentEntry->BlockSize;
1726 }
1727 }
1728 else
1729 {
1730 //
1731 // There is a free entry before us, which we know is smaller
1732 // so we'll make this entry the fragment instead
1733 //
1734 FragmentEntry = Entry;
1735
1736 //
1737 // And then we'll remove from it the actual size required.
1738 // Now the entry is a leftover free fragment
1739 //
1740 Entry->BlockSize -= i;
1741
1742 //
1743 // Now let's go to the next entry after the fragment (which
1744 // used to point to our original free entry) and make it
1745 // reference the new fragment entry instead.
1746 //
1747 // This is the entry that will actually end up holding the
1748 // allocation!
1749 //
1750 Entry = POOL_NEXT_BLOCK(Entry);
1751 Entry->PreviousSize = FragmentEntry->BlockSize;
1752
1753 //
1754 // And now let's go to the entry after that one and check if
1755 // it's still on the same page, and not at the end
1756 //
1757 NextEntry = POOL_BLOCK(Entry, i);
1758 if (PAGE_ALIGN(NextEntry) != NextEntry)
1759 {
1760 //
1761 // Make it reference the allocation entry
1762 //
1763 NextEntry->PreviousSize = i;
1764 }
1765 }
1766
1767 //
1768 // Now our (allocation) entry is the right size
1769 //
1770 Entry->BlockSize = i;
1771
1772 //
1773 // And the next entry is now the free fragment which contains
1774 // the remaining difference between how big the original entry
1775 // was, and the actual size the caller needs/requested.
1776 //
1777 FragmentEntry->PoolType = 0;
1778 BlockSize = FragmentEntry->BlockSize;
1779
1780 //
1781 // Now check if enough free bytes remained for us to have a
1782 // "full" entry, which contains enough bytes for a linked list
1783 // and thus can be used for allocations (up to 8 bytes...)
1784 //
1785 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
1786 if (BlockSize != 1)
1787 {
1788 //
1789 // Insert the free entry into the free list for this size
1790 //
1791 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
1792 POOL_FREE_BLOCK(FragmentEntry));
1793 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
1794 }
1795 }
1796
1797 //
1798 // We have found an entry for this allocation, so set the pool type
1799 // and release the lock since we're done
1800 //
1801 Entry->PoolType = PoolType + 1;
1802 ExpCheckPoolBlocks(Entry);
1803 ExUnlockPool(PoolDesc, OldIrql);
1804
1805 //
1806 // Increment required counters
1807 //
1808 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
1809 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
1810
1811 //
1812 // Track this allocation
1813 //
1814 ExpInsertPoolTracker(Tag,
1815 Entry->BlockSize * POOL_BLOCK_SIZE,
1816 OriginalType);
1817
1818 //
1819 // Return the pool allocation
1820 //
1821 Entry->PoolTag = Tag;
1822 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
1823 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
1824 return POOL_FREE_BLOCK(Entry);
1825 }
1826 } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
1827
1828 //
1829 // There were no free entries left, so we have to allocate a new fresh page
1830 //
1831 Entry = MiAllocatePoolPages(PoolType, PAGE_SIZE);
1832 if (!Entry)
1833 {
1834 //
1835 // Must succeed pool is deprecated, but still supported. These allocation
1836 // failures must cause an immediate bugcheck
1837 //
1838 if (OriginalType & MUST_SUCCEED_POOL_MASK)
1839 {
1840 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1841 PAGE_SIZE,
1842 NonPagedPoolDescriptor.TotalPages,
1843 NonPagedPoolDescriptor.TotalBigPages,
1844 0);
1845 }
1846
1847 //
1848 // Internal debugging
1849 //
1850 ExPoolFailures++;
1851
1852 //
1853 // This flag requests printing failures, and can also further specify
1854 // breaking on failures
1855 //
1856 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
1857 {
1858 DPRINT1("EX: ExAllocatePool (%p, 0x%x) returning NULL\n",
1859 NumberOfBytes,
1860 OriginalType);
1861 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
1862 }
1863
1864 //
1865 // Finally, this flag requests an exception, which we are more than
1866 // happy to raise!
1867 //
1868 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
1869 {
1870 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
1871 }
1872
1873 //
1874 // Return NULL to the caller in all other cases
1875 //
1876 return NULL;
1877 }
1878
1879 //
1880 // Setup the entry data
1881 //
1882 Entry->Ulong1 = 0;
1883 Entry->BlockSize = i;
1884 Entry->PoolType = PoolType + 1;
1885
1886 //
1887 // This page will have two entries -- one for the allocation (which we just
1888 // created above), and one for the remaining free bytes, which we're about
1889 // to create now. The free bytes are the whole page minus what was allocated
1890 // and then converted into units of block headers.
1891 //
1892 BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i;
1893 FragmentEntry = POOL_BLOCK(Entry, i);
1894 FragmentEntry->Ulong1 = 0;
1895 FragmentEntry->BlockSize = BlockSize;
1896 FragmentEntry->PreviousSize = i;
1897
1898 //
1899 // Increment required counters
1900 //
1901 InterlockedIncrement((PLONG)&PoolDesc->TotalPages);
1902 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
1903
1904 //
1905 // Now check if enough free bytes remained for us to have a "full" entry,
1906 // which contains enough bytes for a linked list and thus can be used for
1907 // allocations (up to 8 bytes...)
1908 //
1909 if (FragmentEntry->BlockSize != 1)
1910 {
1911 //
1912 // Excellent -- acquire the pool lock
1913 //
1914 OldIrql = ExLockPool(PoolDesc);
1915
1916 //
1917 // And insert the free entry into the free list for this block size
1918 //
1919 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
1920 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
1921 POOL_FREE_BLOCK(FragmentEntry));
1922 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
1923
1924 //
1925 // Release the pool lock
1926 //
1927 ExpCheckPoolBlocks(Entry);
1928 ExUnlockPool(PoolDesc, OldIrql);
1929 }
1930 else
1931 {
1932 //
1933 // Simply do a sanity check
1934 //
1935 ExpCheckPoolBlocks(Entry);
1936 }
1937
1938 //
1939 // Increment performance counters and track this allocation
1940 //
1941 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
1942 ExpInsertPoolTracker(Tag,
1943 Entry->BlockSize * POOL_BLOCK_SIZE,
1944 PoolType);
1945
1946 //
1947 // And return the pool allocation
1948 //
1949 ExpCheckPoolBlocks(Entry);
1950 Entry->PoolTag = Tag;
1951 return POOL_FREE_BLOCK(Entry);
1952 }
1953
1954 /*
1955 * @implemented
1956 */
1957 PVOID
1958 NTAPI
1959 ExAllocatePool(POOL_TYPE PoolType,
1960 SIZE_T NumberOfBytes)
1961 {
1962 //
1963 // Use a default tag of "None"
1964 //
1965 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, TAG_NONE);
1966 }
1967
1968 /*
1969 * @implemented
1970 */
1971 VOID
1972 NTAPI
1973 ExFreePoolWithTag(IN PVOID P,
1974 IN ULONG TagToFree)
1975 {
1976 PPOOL_HEADER Entry, NextEntry;
1977 USHORT BlockSize;
1978 KIRQL OldIrql;
1979 POOL_TYPE PoolType;
1980 PPOOL_DESCRIPTOR PoolDesc;
1981 ULONG Tag;
1982 BOOLEAN Combined = FALSE;
1983 PFN_NUMBER PageCount, RealPageCount;
1984 PKPRCB Prcb = KeGetCurrentPrcb();
1985 PGENERAL_LOOKASIDE LookasideList;
1986
1987 //
1988 // Check if any of the debug flags are enabled
1989 //
1990 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
1991 POOL_FLAG_CHECK_WORKERS |
1992 POOL_FLAG_CHECK_RESOURCES |
1993 POOL_FLAG_VERIFIER |
1994 POOL_FLAG_CHECK_DEADLOCK |
1995 POOL_FLAG_SPECIAL_POOL))
1996 {
1997 //
1998 // Check if special pool is enabled
1999 //
2000 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
2001 {
2002 //
2003 // Check if it was allocated from a special pool
2004 //
2005 if (MmIsSpecialPoolAddress(P))
2006 {
2007 //
2008 // Was deadlock verification also enabled? We can do some extra
2009 // checks at this point
2010 //
2011 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2012 {
2013 DPRINT1("Verifier not yet supported\n");
2014 }
2015
2016 //
2017 // It is, so handle it via special pool free routine
2018 //
2019 MmFreeSpecialPool(P);
2020 return;
2021 }
2022 }
2023
2024 //
2025 // For non-big page allocations, we'll do a bunch of checks in here
2026 //
2027 if (PAGE_ALIGN(P) != P)
2028 {
2029 //
2030 // Get the entry for this pool allocation
2031 // The pointer math here may look wrong or confusing, but it is quite right
2032 //
2033 Entry = P;
2034 Entry--;
2035
2036 //
2037 // Get the pool type
2038 //
2039 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2040
2041 //
2042 // FIXME: Many other debugging checks go here
2043 //
2044 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2045 }
2046 }
2047
2048 //
2049 // Check if this is a big page allocation
2050 //
2051 if (PAGE_ALIGN(P) == P)
2052 {
2053 //
2054 // We need to find the tag for it, so first we need to find out what
2055 // kind of allocation this was (paged or nonpaged), then we can go
2056 // ahead and try finding the tag for it. Remember to get rid of the
2057 // PROTECTED_POOL tag if it's found.
2058 //
2059 // Note that if at insertion time, we failed to add the tag for a big
2060 // pool allocation, we used a special tag called 'BIG' to identify the
2061 // allocation, and we may get this tag back. In this scenario, we must
2062 // manually get the size of the allocation by actually counting through
2063 // the PFN database.
2064 //
2065 PoolType = MmDeterminePoolType(P);
2066 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2067 Tag = ExpFindAndRemoveTagBigPages(P, &PageCount, PoolType);
2068 if (!Tag)
2069 {
2070 DPRINT1("We do not know the size of this allocation. This is not yet supported\n");
2071 ASSERT(Tag == ' GIB');
2072 PageCount = 1; // We are going to lie! This might screw up accounting?
2073 }
2074 else if (Tag & PROTECTED_POOL)
2075 {
2076 Tag &= ~PROTECTED_POOL;
2077 }
2078
2079 //
2080 // Check block tag
2081 //
2082 if (TagToFree && TagToFree != Tag)
2083 {
2084 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2085 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2086 }
2087
2088 //
2089 // We have our tag and our page count, so we can go ahead and remove this
2090 // tracker now
2091 //
2092 ExpRemovePoolTracker(Tag, PageCount << PAGE_SHIFT, PoolType);
2093
2094 //
2095 // Check if any of the debug flags are enabled
2096 //
2097 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
2098 POOL_FLAG_CHECK_WORKERS |
2099 POOL_FLAG_CHECK_RESOURCES |
2100 POOL_FLAG_CHECK_DEADLOCK))
2101 {
2102 //
2103 // Was deadlock verification also enabled? We can do some extra
2104 // checks at this point
2105 //
2106 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2107 {
2108 DPRINT1("Verifier not yet supported\n");
2109 }
2110
2111 //
2112 // FIXME: Many debugging checks go here
2113 //
2114 }
2115
2116 //
2117 // Update counters
2118 //
2119 PoolDesc = PoolVector[PoolType];
2120 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2121 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes,
2122 -(LONG_PTR)(PageCount << PAGE_SHIFT));
2123
2124 //
2125 // Do the real free now and update the last counter with the big page count
2126 //
2127 RealPageCount = MiFreePoolPages(P);
2128 ASSERT(RealPageCount == PageCount);
2129 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
2130 -(LONG)RealPageCount);
2131 return;
2132 }
2133
2134 //
2135 // Get the entry for this pool allocation
2136 // The pointer math here may look wrong or confusing, but it is quite right
2137 //
2138 Entry = P;
2139 Entry--;
2140
2141 //
2142 // Get the size of the entry, and it's pool type, then load the descriptor
2143 // for this pool type
2144 //
2145 BlockSize = Entry->BlockSize;
2146 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2147 PoolDesc = PoolVector[PoolType];
2148
2149 //
2150 // Make sure that the IRQL makes sense
2151 //
2152 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2153
2154 //
2155 // Get the pool tag and get rid of the PROTECTED_POOL flag
2156 //
2157 Tag = Entry->PoolTag;
2158 if (Tag & PROTECTED_POOL) Tag &= ~PROTECTED_POOL;
2159
2160 //
2161 // Check block tag
2162 //
2163 if (TagToFree && TagToFree != Tag)
2164 {
2165 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2166 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2167 }
2168
2169 //
2170 // Track the removal of this allocation
2171 //
2172 ExpRemovePoolTracker(Tag,
2173 BlockSize * POOL_BLOCK_SIZE,
2174 Entry->PoolType - 1);
2175
2176 //
2177 // Is this allocation small enough to have come from a lookaside list?
2178 //
2179 if (BlockSize <= MAXIMUM_PROCESSORS)
2180 {
2181 //
2182 // Try pushing it into the per-CPU lookaside list
2183 //
2184 LookasideList = (PoolType == PagedPool) ?
2185 Prcb->PPPagedLookasideList[BlockSize - 1].P :
2186 Prcb->PPNPagedLookasideList[BlockSize - 1].P;
2187 LookasideList->TotalFrees++;
2188 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2189 {
2190 LookasideList->FreeHits++;
2191 InterlockedPushEntrySList(&LookasideList->ListHead, P);
2192 return;
2193 }
2194
2195 //
2196 // We failed, try to push it into the global lookaside list
2197 //
2198 LookasideList = (PoolType == PagedPool) ?
2199 Prcb->PPPagedLookasideList[BlockSize - 1].L :
2200 Prcb->PPNPagedLookasideList[BlockSize - 1].L;
2201 LookasideList->TotalFrees++;
2202 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2203 {
2204 LookasideList->FreeHits++;
2205 InterlockedPushEntrySList(&LookasideList->ListHead, P);
2206 return;
2207 }
2208 }
2209
2210 //
2211 // Get the pointer to the next entry
2212 //
2213 NextEntry = POOL_BLOCK(Entry, BlockSize);
2214
2215 //
2216 // Update performance counters
2217 //
2218 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2219 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, -BlockSize * POOL_BLOCK_SIZE);
2220
2221 //
2222 // Acquire the pool lock
2223 //
2224 OldIrql = ExLockPool(PoolDesc);
2225
2226 //
2227 // Check if the next allocation is at the end of the page
2228 //
2229 ExpCheckPoolBlocks(Entry);
2230 if (PAGE_ALIGN(NextEntry) != NextEntry)
2231 {
2232 //
2233 // We may be able to combine the block if it's free
2234 //
2235 if (NextEntry->PoolType == 0)
2236 {
2237 //
2238 // The next block is free, so we'll do a combine
2239 //
2240 Combined = TRUE;
2241
2242 //
2243 // Make sure there's actual data in the block -- anything smaller
2244 // than this means we only have the header, so there's no linked list
2245 // for us to remove
2246 //
2247 if ((NextEntry->BlockSize != 1))
2248 {
2249 //
2250 // The block is at least big enough to have a linked list, so go
2251 // ahead and remove it
2252 //
2253 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2254 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2255 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2256 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2257 }
2258
2259 //
2260 // Our entry is now combined with the next entry
2261 //
2262 Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
2263 }
2264 }
2265
2266 //
2267 // Now check if there was a previous entry on the same page as us
2268 //
2269 if (Entry->PreviousSize)
2270 {
2271 //
2272 // Great, grab that entry and check if it's free
2273 //
2274 NextEntry = POOL_PREV_BLOCK(Entry);
2275 if (NextEntry->PoolType == 0)
2276 {
2277 //
2278 // It is, so we can do a combine
2279 //
2280 Combined = TRUE;
2281
2282 //
2283 // Make sure there's actual data in the block -- anything smaller
2284 // than this means we only have the header so there's no linked list
2285 // for us to remove
2286 //
2287 if ((NextEntry->BlockSize != 1))
2288 {
2289 //
2290 // The block is at least big enough to have a linked list, so go
2291 // ahead and remove it
2292 //
2293 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2294 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2295 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2296 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2297 }
2298
2299 //
2300 // Combine our original block (which might've already been combined
2301 // with the next block), into the previous block
2302 //
2303 NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
2304
2305 //
2306 // And now we'll work with the previous block instead
2307 //
2308 Entry = NextEntry;
2309 }
2310 }
2311
2312 //
2313 // By now, it may have been possible for our combined blocks to actually
2314 // have made up a full page (if there were only 2-3 allocations on the
2315 // page, they could've all been combined).
2316 //
2317 if ((PAGE_ALIGN(Entry) == Entry) &&
2318 (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry)) == POOL_NEXT_BLOCK(Entry)))
2319 {
2320 //
2321 // In this case, release the pool lock, update the performance counter,
2322 // and free the page
2323 //
2324 ExUnlockPool(PoolDesc, OldIrql);
2325 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalPages, -1);
2326 MiFreePoolPages(Entry);
2327 return;
2328 }
2329
2330 //
2331 // Otherwise, we now have a free block (or a combination of 2 or 3)
2332 //
2333 Entry->PoolType = 0;
2334 BlockSize = Entry->BlockSize;
2335 ASSERT(BlockSize != 1);
2336
2337 //
2338 // Check if we actually did combine it with anyone
2339 //
2340 if (Combined)
2341 {
2342 //
2343 // Get the first combined block (either our original to begin with, or
2344 // the one after the original, depending if we combined with the previous)
2345 //
2346 NextEntry = POOL_NEXT_BLOCK(Entry);
2347
2348 //
2349 // As long as the next block isn't on a page boundary, have it point
2350 // back to us
2351 //
2352 if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
2353 }
2354
2355 //
2356 // Insert this new free block, and release the pool lock
2357 //
2358 ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
2359 ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry));
2360 ExUnlockPool(PoolDesc, OldIrql);
2361 }
2362
2363 /*
2364 * @implemented
2365 */
2366 VOID
2367 NTAPI
2368 ExFreePool(PVOID P)
2369 {
2370 //
2371 // Just free without checking for the tag
2372 //
2373 ExFreePoolWithTag(P, 0);
2374 }
2375
2376 /*
2377 * @unimplemented
2378 */
2379 SIZE_T
2380 NTAPI
2381 ExQueryPoolBlockSize(IN PVOID PoolBlock,
2382 OUT PBOOLEAN QuotaCharged)
2383 {
2384 //
2385 // Not implemented
2386 //
2387 UNIMPLEMENTED;
2388 return FALSE;
2389 }
2390
2391 /*
2392 * @implemented
2393 */
2394
2395 PVOID
2396 NTAPI
2397 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType,
2398 IN SIZE_T NumberOfBytes)
2399 {
2400 //
2401 // Allocate the pool
2402 //
2403 return ExAllocatePoolWithQuotaTag(PoolType, NumberOfBytes, 'enoN');
2404 }
2405
2406 /*
2407 * @implemented
2408 */
2409 PVOID
2410 NTAPI
2411 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType,
2412 IN SIZE_T NumberOfBytes,
2413 IN ULONG Tag,
2414 IN EX_POOL_PRIORITY Priority)
2415 {
2416 //
2417 // Allocate the pool
2418 //
2419 UNIMPLEMENTED;
2420 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2421 }
2422
2423 /*
2424 * @implemented
2425 */
2426 PVOID
2427 NTAPI
2428 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType,
2429 IN SIZE_T NumberOfBytes,
2430 IN ULONG Tag)
2431 {
2432 //
2433 // Allocate the pool
2434 //
2435 UNIMPLEMENTED;
2436 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2437 }
2438
2439 /* EOF */