c2589154955df98ff3d89454fcc7309256088653
[reactos.git] / ntoskrnl / mm / ARM3 / expool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
17
18 #undef ExAllocatePoolWithQuota
19 #undef ExAllocatePoolWithQuotaTag
20
21 /* GLOBALS ********************************************************************/
22
23 #define POOL_BIG_TABLE_ENTRY_FREE 0x1
24
25 typedef struct _POOL_DPC_CONTEXT
26 {
27 PPOOL_TRACKER_TABLE PoolTrackTable;
28 SIZE_T PoolTrackTableSize;
29 PPOOL_TRACKER_TABLE PoolTrackTableExpansion;
30 SIZE_T PoolTrackTableSizeExpansion;
31 } POOL_DPC_CONTEXT, *PPOOL_DPC_CONTEXT;
32
33 ULONG ExpNumberOfPagedPools;
34 POOL_DESCRIPTOR NonPagedPoolDescriptor;
35 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16 + 1];
36 PPOOL_DESCRIPTOR PoolVector[2];
37 PKGUARDED_MUTEX ExpPagedPoolMutex;
38 SIZE_T PoolTrackTableSize, PoolTrackTableMask;
39 SIZE_T PoolBigPageTableSize, PoolBigPageTableHash;
40 PPOOL_TRACKER_TABLE PoolTrackTable;
41 PPOOL_TRACKER_BIG_PAGES PoolBigPageTable;
42 KSPIN_LOCK ExpTaggedPoolLock;
43 ULONG PoolHitTag;
44 BOOLEAN ExStopBadTags;
45 KSPIN_LOCK ExpLargePoolTableLock;
46 ULONG ExpPoolBigEntriesInUse;
47 ULONG ExpPoolFlags;
48 ULONG ExPoolFailures;
49
50 /* Pool block/header/list access macros */
51 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
52 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER))
53 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
54 #define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
55 #define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize))
56
57 /*
58 * Pool list access debug macros, similar to Arthur's pfnlist.c work.
59 * Microsoft actually implements similar checks in the Windows Server 2003 SP1
60 * pool code, but only for checked builds.
61 *
62 * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
63 * that these checks are done even on retail builds, due to the increasing
64 * number of kernel-mode attacks which depend on dangling list pointers and other
65 * kinds of list-based attacks.
66 *
67 * For now, I will leave these checks on all the time, but later they are likely
68 * to be DBG-only, at least until there are enough kernel-mode security attacks
69 * against ReactOS to warrant the performance hit.
70 *
71 * For now, these are not made inline, so we can get good stack traces.
72 */
73 PLIST_ENTRY
74 NTAPI
75 ExpDecodePoolLink(IN PLIST_ENTRY Link)
76 {
77 return (PLIST_ENTRY)((ULONG_PTR)Link & ~1);
78 }
79
80 PLIST_ENTRY
81 NTAPI
82 ExpEncodePoolLink(IN PLIST_ENTRY Link)
83 {
84 return (PLIST_ENTRY)((ULONG_PTR)Link | 1);
85 }
86
87 VOID
88 NTAPI
89 ExpCheckPoolLinks(IN PLIST_ENTRY ListHead)
90 {
91 if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) ||
92 (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead))
93 {
94 KeBugCheckEx(BAD_POOL_HEADER,
95 3,
96 (ULONG_PTR)ListHead,
97 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink),
98 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink));
99 }
100 }
101
102 VOID
103 NTAPI
104 ExpInitializePoolListHead(IN PLIST_ENTRY ListHead)
105 {
106 ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead);
107 }
108
109 BOOLEAN
110 NTAPI
111 ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead)
112 {
113 return (ExpDecodePoolLink(ListHead->Flink) == ListHead);
114 }
115
116 VOID
117 NTAPI
118 ExpRemovePoolEntryList(IN PLIST_ENTRY Entry)
119 {
120 PLIST_ENTRY Blink, Flink;
121 Flink = ExpDecodePoolLink(Entry->Flink);
122 Blink = ExpDecodePoolLink(Entry->Blink);
123 Flink->Blink = ExpEncodePoolLink(Blink);
124 Blink->Flink = ExpEncodePoolLink(Flink);
125 }
126
127 PLIST_ENTRY
128 NTAPI
129 ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead)
130 {
131 PLIST_ENTRY Entry, Flink;
132 Entry = ExpDecodePoolLink(ListHead->Flink);
133 Flink = ExpDecodePoolLink(Entry->Flink);
134 ListHead->Flink = ExpEncodePoolLink(Flink);
135 Flink->Blink = ExpEncodePoolLink(ListHead);
136 return Entry;
137 }
138
139 PLIST_ENTRY
140 NTAPI
141 ExpRemovePoolTailList(IN PLIST_ENTRY ListHead)
142 {
143 PLIST_ENTRY Entry, Blink;
144 Entry = ExpDecodePoolLink(ListHead->Blink);
145 Blink = ExpDecodePoolLink(Entry->Blink);
146 ListHead->Blink = ExpEncodePoolLink(Blink);
147 Blink->Flink = ExpEncodePoolLink(ListHead);
148 return Entry;
149 }
150
151 VOID
152 NTAPI
153 ExpInsertPoolTailList(IN PLIST_ENTRY ListHead,
154 IN PLIST_ENTRY Entry)
155 {
156 PLIST_ENTRY Blink;
157 ExpCheckPoolLinks(ListHead);
158 Blink = ExpDecodePoolLink(ListHead->Blink);
159 Entry->Flink = ExpEncodePoolLink(ListHead);
160 Entry->Blink = ExpEncodePoolLink(Blink);
161 Blink->Flink = ExpEncodePoolLink(Entry);
162 ListHead->Blink = ExpEncodePoolLink(Entry);
163 ExpCheckPoolLinks(ListHead);
164 }
165
166 VOID
167 NTAPI
168 ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead,
169 IN PLIST_ENTRY Entry)
170 {
171 PLIST_ENTRY Flink;
172 ExpCheckPoolLinks(ListHead);
173 Flink = ExpDecodePoolLink(ListHead->Flink);
174 Entry->Flink = ExpEncodePoolLink(Flink);
175 Entry->Blink = ExpEncodePoolLink(ListHead);
176 Flink->Blink = ExpEncodePoolLink(Entry);
177 ListHead->Flink = ExpEncodePoolLink(Entry);
178 ExpCheckPoolLinks(ListHead);
179 }
180
181 VOID
182 NTAPI
183 ExpCheckPoolHeader(IN PPOOL_HEADER Entry)
184 {
185 PPOOL_HEADER PreviousEntry, NextEntry;
186
187 /* Is there a block before this one? */
188 if (Entry->PreviousSize)
189 {
190 /* Get it */
191 PreviousEntry = POOL_PREV_BLOCK(Entry);
192
193 /* The two blocks must be on the same page! */
194 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry))
195 {
196 /* Something is awry */
197 KeBugCheckEx(BAD_POOL_HEADER,
198 6,
199 (ULONG_PTR)PreviousEntry,
200 __LINE__,
201 (ULONG_PTR)Entry);
202 }
203
204 /* This block should also indicate that it's as large as we think it is */
205 if (PreviousEntry->BlockSize != Entry->PreviousSize)
206 {
207 /* Otherwise, someone corrupted one of the sizes */
208 DPRINT1("PreviousEntry BlockSize %lu, tag %.4s. Entry PreviousSize %lu, tag %.4s\n",
209 PreviousEntry->BlockSize, (char *)&PreviousEntry->PoolTag,
210 Entry->PreviousSize, (char *)&Entry->PoolTag);
211 KeBugCheckEx(BAD_POOL_HEADER,
212 5,
213 (ULONG_PTR)PreviousEntry,
214 __LINE__,
215 (ULONG_PTR)Entry);
216 }
217 }
218 else if (PAGE_ALIGN(Entry) != Entry)
219 {
220 /* If there's no block before us, we are the first block, so we should be on a page boundary */
221 KeBugCheckEx(BAD_POOL_HEADER,
222 7,
223 0,
224 __LINE__,
225 (ULONG_PTR)Entry);
226 }
227
228 /* This block must have a size */
229 if (!Entry->BlockSize)
230 {
231 /* Someone must've corrupted this field */
232 if (Entry->PreviousSize)
233 {
234 PreviousEntry = POOL_PREV_BLOCK(Entry);
235 DPRINT1("PreviousEntry tag %.4s. Entry tag %.4s\n",
236 (char *)&PreviousEntry->PoolTag,
237 (char *)&Entry->PoolTag);
238 }
239 else
240 {
241 DPRINT1("Entry tag %.4s\n",
242 (char *)&Entry->PoolTag);
243 }
244 KeBugCheckEx(BAD_POOL_HEADER,
245 8,
246 0,
247 __LINE__,
248 (ULONG_PTR)Entry);
249 }
250
251 /* Okay, now get the next block */
252 NextEntry = POOL_NEXT_BLOCK(Entry);
253
254 /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
255 if (PAGE_ALIGN(NextEntry) != NextEntry)
256 {
257 /* The two blocks must be on the same page! */
258 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry))
259 {
260 /* Something is messed up */
261 KeBugCheckEx(BAD_POOL_HEADER,
262 9,
263 (ULONG_PTR)NextEntry,
264 __LINE__,
265 (ULONG_PTR)Entry);
266 }
267
268 /* And this block should think we are as large as we truly are */
269 if (NextEntry->PreviousSize != Entry->BlockSize)
270 {
271 /* Otherwise, someone corrupted the field */
272 DPRINT1("Entry BlockSize %lu, tag %.4s. NextEntry PreviousSize %lu, tag %.4s\n",
273 Entry->BlockSize, (char *)&Entry->PoolTag,
274 NextEntry->PreviousSize, (char *)&NextEntry->PoolTag);
275 KeBugCheckEx(BAD_POOL_HEADER,
276 5,
277 (ULONG_PTR)NextEntry,
278 __LINE__,
279 (ULONG_PTR)Entry);
280 }
281 }
282 }
283
284 VOID
285 NTAPI
286 ExpCheckPoolAllocation(
287 PVOID P,
288 POOL_TYPE PoolType,
289 ULONG Tag)
290 {
291 PPOOL_HEADER Entry;
292 ULONG i;
293 KIRQL OldIrql;
294 POOL_TYPE RealPoolType;
295
296 /* Get the pool header */
297 Entry = ((PPOOL_HEADER)P) - 1;
298
299 /* Check if this is a large allocation */
300 if (PAGE_ALIGN(P) == P)
301 {
302 /* Lock the pool table */
303 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
304
305 /* Find the pool tag */
306 for (i = 0; i < PoolBigPageTableSize; i++)
307 {
308 /* Check if this is our allocation */
309 if (PoolBigPageTable[i].Va == P)
310 {
311 /* Make sure the tag is ok */
312 if (PoolBigPageTable[i].Key != Tag)
313 {
314 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, PoolBigPageTable[i].Key, Tag);
315 }
316
317 break;
318 }
319 }
320
321 /* Release the lock */
322 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
323
324 if (i == PoolBigPageTableSize)
325 {
326 /* Did not find the allocation */
327 //ASSERT(FALSE);
328 }
329
330 /* Get Pool type by address */
331 RealPoolType = MmDeterminePoolType(P);
332 }
333 else
334 {
335 /* Verify the tag */
336 if (Entry->PoolTag != Tag)
337 {
338 DPRINT1("Allocation has wrong pool tag! Expected '%.4s', got '%.4s' (0x%08lx)\n",
339 &Tag, &Entry->PoolTag, Entry->PoolTag);
340 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Entry->PoolTag, Tag);
341 }
342
343 /* Check the rest of the header */
344 ExpCheckPoolHeader(Entry);
345
346 /* Get Pool type from entry */
347 RealPoolType = (Entry->PoolType - 1);
348 }
349
350 /* Should we check the pool type? */
351 if (PoolType != -1)
352 {
353 /* Verify the pool type */
354 if (RealPoolType != PoolType)
355 {
356 DPRINT1("Wrong pool type! Expected %s, got %s\n",
357 PoolType & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool",
358 (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool");
359 KeBugCheckEx(BAD_POOL_CALLER, 0xCC, (ULONG_PTR)P, Entry->PoolTag, Tag);
360 }
361 }
362 }
363
364 VOID
365 NTAPI
366 ExpCheckPoolBlocks(IN PVOID Block)
367 {
368 BOOLEAN FoundBlock = FALSE;
369 SIZE_T Size = 0;
370 PPOOL_HEADER Entry;
371
372 /* Get the first entry for this page, make sure it really is the first */
373 Entry = PAGE_ALIGN(Block);
374 ASSERT(Entry->PreviousSize == 0);
375
376 /* Now scan each entry */
377 while (TRUE)
378 {
379 /* When we actually found our block, remember this */
380 if (Entry == Block) FoundBlock = TRUE;
381
382 /* Now validate this block header */
383 ExpCheckPoolHeader(Entry);
384
385 /* And go to the next one, keeping track of our size */
386 Size += Entry->BlockSize;
387 Entry = POOL_NEXT_BLOCK(Entry);
388
389 /* If we hit the last block, stop */
390 if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break;
391
392 /* If we hit the end of the page, stop */
393 if (PAGE_ALIGN(Entry) == Entry) break;
394 }
395
396 /* We must've found our block, and we must have hit the end of the page */
397 if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock))
398 {
399 /* Otherwise, the blocks are messed up */
400 KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry);
401 }
402 }
403
404 FORCEINLINE
405 VOID
406 ExpCheckPoolIrqlLevel(IN POOL_TYPE PoolType,
407 IN SIZE_T NumberOfBytes,
408 IN PVOID Entry)
409 {
410 //
411 // Validate IRQL: It must be APC_LEVEL or lower for Paged Pool, and it must
412 // be DISPATCH_LEVEL or lower for Non Paged Pool
413 //
414 if (((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) ?
415 (KeGetCurrentIrql() > APC_LEVEL) :
416 (KeGetCurrentIrql() > DISPATCH_LEVEL))
417 {
418 //
419 // Take the system down
420 //
421 KeBugCheckEx(BAD_POOL_CALLER,
422 !Entry ? POOL_ALLOC_IRQL_INVALID : POOL_FREE_IRQL_INVALID,
423 KeGetCurrentIrql(),
424 PoolType,
425 !Entry ? NumberOfBytes : (ULONG_PTR)Entry);
426 }
427 }
428
429 FORCEINLINE
430 ULONG
431 ExpComputeHashForTag(IN ULONG Tag,
432 IN SIZE_T BucketMask)
433 {
434 //
435 // Compute the hash by multiplying with a large prime number and then XORing
436 // with the HIDWORD of the result.
437 //
438 // Finally, AND with the bucket mask to generate a valid index/bucket into
439 // the table
440 //
441 ULONGLONG Result = (ULONGLONG)40543 * Tag;
442 return (ULONG)BucketMask & ((ULONG)Result ^ (Result >> 32));
443 }
444
445 FORCEINLINE
446 ULONG
447 ExpComputePartialHashForAddress(IN PVOID BaseAddress)
448 {
449 ULONG Result;
450 //
451 // Compute the hash by converting the address into a page number, and then
452 // XORing each nibble with the next one.
453 //
454 // We do *NOT* AND with the bucket mask at this point because big table expansion
455 // might happen. Therefore, the final step of the hash must be performed
456 // while holding the expansion pushlock, and this is why we call this a
457 // "partial" hash only.
458 //
459 Result = (ULONG)((ULONG_PTR)BaseAddress >> PAGE_SHIFT);
460 return (Result >> 24) ^ (Result >> 16) ^ (Result >> 8) ^ Result;
461 }
462
463 #if DBG
464 FORCEINLINE
465 BOOLEAN
466 ExpTagAllowPrint(CHAR Tag)
467 {
468 if ((Tag >= 'a' && Tag <= 'z') ||
469 (Tag >= 'A' && Tag <= 'Z') ||
470 Tag == ' ')
471 {
472 return TRUE;
473 }
474
475 return FALSE;
476 }
477
478 #define MiDumperPrint(dbg, fmt, ...) \
479 if (dbg) KdbpPrint(fmt, ##__VA_ARGS__); \
480 else DPRINT1(fmt, ##__VA_ARGS__)
481
482 VOID
483 MiDumpPoolConsumers(BOOLEAN CalledFromDbg, ULONG Tag)
484 {
485 SIZE_T i;
486
487 //
488 // Only print header if called from OOM situation
489 //
490 if (!CalledFromDbg)
491 {
492 DPRINT1("---------------------\n");
493 DPRINT1("Out of memory dumper!\n");
494 }
495 else
496 {
497 KdbpPrint("Pool Used:\n");
498 }
499
500 //
501 // Print table header
502 //
503 MiDumperPrint(CalledFromDbg, "\t\tNonPaged\t\t\tPaged\n");
504 MiDumperPrint(CalledFromDbg, "Tag\t\tAllocs\t\tUsed\t\tAllocs\t\tUsed\n");
505
506 //
507 // We'll extract allocations for all the tracked pools
508 //
509 for (i = 0; i < PoolTrackTableSize; ++i)
510 {
511 PPOOL_TRACKER_TABLE TableEntry;
512
513 TableEntry = &PoolTrackTable[i];
514
515 //
516 // We only care about tags which have allocated memory
517 //
518 if (TableEntry->NonPagedBytes != 0 || TableEntry->PagedBytes != 0)
519 {
520 //
521 // If there's a tag, attempt to do a pretty print
522 // only if it matches the caller tag, or if
523 // any tag is allowed
524 //
525 if (TableEntry->Key != 0 && TableEntry->Key != TAG_NONE && (Tag == 0 || TableEntry->Key == Tag))
526 {
527 CHAR Tag[4];
528
529 //
530 // Extract each 'component' and check whether they are printable
531 //
532 Tag[0] = TableEntry->Key & 0xFF;
533 Tag[1] = TableEntry->Key >> 8 & 0xFF;
534 Tag[2] = TableEntry->Key >> 16 & 0xFF;
535 Tag[3] = TableEntry->Key >> 24 & 0xFF;
536
537 if (ExpTagAllowPrint(Tag[0]) && ExpTagAllowPrint(Tag[1]) && ExpTagAllowPrint(Tag[2]) && ExpTagAllowPrint(Tag[3]))
538 {
539 //
540 // Print in reversed order to match what is in source code
541 //
542 MiDumperPrint(CalledFromDbg, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag[3], Tag[2], Tag[1], Tag[0],
543 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
544 TableEntry->PagedAllocs, TableEntry->PagedBytes);
545 }
546 else
547 {
548 MiDumperPrint(CalledFromDbg, "%x\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry->Key,
549 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
550 TableEntry->PagedAllocs, TableEntry->PagedBytes);
551 }
552 }
553 else if (Tag == 0 || Tag == TAG_NONE)
554 {
555 MiDumperPrint(CalledFromDbg, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n",
556 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
557 TableEntry->PagedAllocs, TableEntry->PagedBytes);
558 }
559 }
560 }
561
562 if (!CalledFromDbg)
563 {
564 DPRINT1("---------------------\n");
565 }
566 }
567 #endif
568
569 /* PRIVATE FUNCTIONS **********************************************************/
570
571 VOID
572 NTAPI
573 INIT_SECTION
574 ExpSeedHotTags(VOID)
575 {
576 ULONG i, Key, Hash, Index;
577 PPOOL_TRACKER_TABLE TrackTable = PoolTrackTable;
578 ULONG TagList[] =
579 {
580 ' oI',
581 ' laH',
582 'PldM',
583 'LooP',
584 'tSbO',
585 ' prI',
586 'bdDN',
587 'LprI',
588 'pOoI',
589 ' ldM',
590 'eliF',
591 'aVMC',
592 'dSeS',
593 'CFtN',
594 'looP',
595 'rPCT',
596 'bNMC',
597 'dTeS',
598 'sFtN',
599 'TPCT',
600 'CPCT',
601 ' yeK',
602 'qSbO',
603 'mNoI',
604 'aEoI',
605 'cPCT',
606 'aFtN',
607 '0ftN',
608 'tceS',
609 'SprI',
610 'ekoT',
611 ' eS',
612 'lCbO',
613 'cScC',
614 'lFtN',
615 'cAeS',
616 'mfSF',
617 'kWcC',
618 'miSF',
619 'CdfA',
620 'EdfA',
621 'orSF',
622 'nftN',
623 'PRIU',
624 'rFpN',
625 'RFpN',
626 'aPeS',
627 'sUeS',
628 'FpcA',
629 'MpcA',
630 'cSeS',
631 'mNbO',
632 'sFpN',
633 'uLeS',
634 'DPcS',
635 'nevE',
636 'vrqR',
637 'ldaV',
638 ' pP',
639 'SdaV',
640 ' daV',
641 'LdaV',
642 'FdaV',
643 ' GIB',
644 };
645
646 //
647 // Loop all 64 hot tags
648 //
649 ASSERT((sizeof(TagList) / sizeof(ULONG)) == 64);
650 for (i = 0; i < sizeof(TagList) / sizeof(ULONG); i++)
651 {
652 //
653 // Get the current tag, and compute its hash in the tracker table
654 //
655 Key = TagList[i];
656 Hash = ExpComputeHashForTag(Key, PoolTrackTableMask);
657
658 //
659 // Loop all the hashes in this index/bucket
660 //
661 Index = Hash;
662 while (TRUE)
663 {
664 //
665 // Find an empty entry, and make sure this isn't the last hash that
666 // can fit.
667 //
668 // On checked builds, also make sure this is the first time we are
669 // seeding this tag.
670 //
671 ASSERT(TrackTable[Hash].Key != Key);
672 if (!(TrackTable[Hash].Key) && (Hash != PoolTrackTableSize - 1))
673 {
674 //
675 // It has been seeded, move on to the next tag
676 //
677 TrackTable[Hash].Key = Key;
678 break;
679 }
680
681 //
682 // This entry was already taken, compute the next possible hash while
683 // making sure we're not back at our initial index.
684 //
685 ASSERT(TrackTable[Hash].Key != Key);
686 Hash = (Hash + 1) & PoolTrackTableMask;
687 if (Hash == Index) break;
688 }
689 }
690 }
691
692 VOID
693 NTAPI
694 ExpRemovePoolTracker(IN ULONG Key,
695 IN SIZE_T NumberOfBytes,
696 IN POOL_TYPE PoolType)
697 {
698 ULONG Hash, Index;
699 PPOOL_TRACKER_TABLE Table, TableEntry;
700 SIZE_T TableMask, TableSize;
701
702 //
703 // Remove the PROTECTED_POOL flag which is not part of the tag
704 //
705 Key &= ~PROTECTED_POOL;
706
707 //
708 // With WinDBG you can set a tag you want to break on when an allocation is
709 // attempted
710 //
711 if (Key == PoolHitTag) DbgBreakPoint();
712
713 //
714 // Why the double indirection? Because normally this function is also used
715 // when doing session pool allocations, which has another set of tables,
716 // sizes, and masks that live in session pool. Now we don't support session
717 // pool so we only ever use the regular tables, but I'm keeping the code this
718 // way so that the day we DO support session pool, it won't require that
719 // many changes
720 //
721 Table = PoolTrackTable;
722 TableMask = PoolTrackTableMask;
723 TableSize = PoolTrackTableSize;
724 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize);
725
726 //
727 // Compute the hash for this key, and loop all the possible buckets
728 //
729 Hash = ExpComputeHashForTag(Key, TableMask);
730 Index = Hash;
731 while (TRUE)
732 {
733 //
734 // Have we found the entry for this tag? */
735 //
736 TableEntry = &Table[Hash];
737 if (TableEntry->Key == Key)
738 {
739 //
740 // Decrement the counters depending on if this was paged or nonpaged
741 // pool
742 //
743 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
744 {
745 InterlockedIncrement(&TableEntry->NonPagedFrees);
746 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes,
747 -(SSIZE_T)NumberOfBytes);
748 return;
749 }
750 InterlockedIncrement(&TableEntry->PagedFrees);
751 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes,
752 -(SSIZE_T)NumberOfBytes);
753 return;
754 }
755
756 //
757 // We should have only ended up with an empty entry if we've reached
758 // the last bucket
759 //
760 if (!TableEntry->Key)
761 {
762 DPRINT1("Empty item reached in tracker table. Hash=0x%lx, TableMask=0x%lx, Tag=0x%08lx, NumberOfBytes=%lu, PoolType=%d\n",
763 Hash, TableMask, Key, (ULONG)NumberOfBytes, PoolType);
764 ASSERT(Hash == TableMask);
765 }
766
767 //
768 // This path is hit when we don't have an entry, and the current bucket
769 // is full, so we simply try the next one
770 //
771 Hash = (Hash + 1) & TableMask;
772 if (Hash == Index) break;
773 }
774
775 //
776 // And finally this path is hit when all the buckets are full, and we need
777 // some expansion. This path is not yet supported in ReactOS and so we'll
778 // ignore the tag
779 //
780 DPRINT1("Out of pool tag space, ignoring...\n");
781 }
782
783 VOID
784 NTAPI
785 ExpInsertPoolTracker(IN ULONG Key,
786 IN SIZE_T NumberOfBytes,
787 IN POOL_TYPE PoolType)
788 {
789 ULONG Hash, Index;
790 KIRQL OldIrql;
791 PPOOL_TRACKER_TABLE Table, TableEntry;
792 SIZE_T TableMask, TableSize;
793
794 //
795 // Remove the PROTECTED_POOL flag which is not part of the tag
796 //
797 Key &= ~PROTECTED_POOL;
798
799 //
800 // With WinDBG you can set a tag you want to break on when an allocation is
801 // attempted
802 //
803 if (Key == PoolHitTag) DbgBreakPoint();
804
805 //
806 // There is also an internal flag you can set to break on malformed tags
807 //
808 if (ExStopBadTags) ASSERT(Key & 0xFFFFFF00);
809
810 //
811 // ASSERT on ReactOS features not yet supported
812 //
813 ASSERT(!(PoolType & SESSION_POOL_MASK));
814 ASSERT(KeGetCurrentProcessorNumber() == 0);
815
816 //
817 // Why the double indirection? Because normally this function is also used
818 // when doing session pool allocations, which has another set of tables,
819 // sizes, and masks that live in session pool. Now we don't support session
820 // pool so we only ever use the regular tables, but I'm keeping the code this
821 // way so that the day we DO support session pool, it won't require that
822 // many changes
823 //
824 Table = PoolTrackTable;
825 TableMask = PoolTrackTableMask;
826 TableSize = PoolTrackTableSize;
827 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize);
828
829 //
830 // Compute the hash for this key, and loop all the possible buckets
831 //
832 Hash = ExpComputeHashForTag(Key, TableMask);
833 Index = Hash;
834 while (TRUE)
835 {
836 //
837 // Do we already have an entry for this tag? */
838 //
839 TableEntry = &Table[Hash];
840 if (TableEntry->Key == Key)
841 {
842 //
843 // Increment the counters depending on if this was paged or nonpaged
844 // pool
845 //
846 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
847 {
848 InterlockedIncrement(&TableEntry->NonPagedAllocs);
849 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes, NumberOfBytes);
850 return;
851 }
852 InterlockedIncrement(&TableEntry->PagedAllocs);
853 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes, NumberOfBytes);
854 return;
855 }
856
857 //
858 // We don't have an entry yet, but we've found a free bucket for it
859 //
860 if (!(TableEntry->Key) && (Hash != PoolTrackTableSize - 1))
861 {
862 //
863 // We need to hold the lock while creating a new entry, since other
864 // processors might be in this code path as well
865 //
866 ExAcquireSpinLock(&ExpTaggedPoolLock, &OldIrql);
867 if (!PoolTrackTable[Hash].Key)
868 {
869 //
870 // We've won the race, so now create this entry in the bucket
871 //
872 ASSERT(Table[Hash].Key == 0);
873 PoolTrackTable[Hash].Key = Key;
874 TableEntry->Key = Key;
875 }
876 ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql);
877
878 //
879 // Now we force the loop to run again, and we should now end up in
880 // the code path above which does the interlocked increments...
881 //
882 continue;
883 }
884
885 //
886 // This path is hit when we don't have an entry, and the current bucket
887 // is full, so we simply try the next one
888 //
889 Hash = (Hash + 1) & TableMask;
890 if (Hash == Index) break;
891 }
892
893 //
894 // And finally this path is hit when all the buckets are full, and we need
895 // some expansion. This path is not yet supported in ReactOS and so we'll
896 // ignore the tag
897 //
898 DPRINT1("Out of pool tag space, ignoring...\n");
899 }
900
901 VOID
902 NTAPI
903 INIT_SECTION
904 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor,
905 IN POOL_TYPE PoolType,
906 IN ULONG PoolIndex,
907 IN ULONG Threshold,
908 IN PVOID PoolLock)
909 {
910 PLIST_ENTRY NextEntry, LastEntry;
911
912 //
913 // Setup the descriptor based on the caller's request
914 //
915 PoolDescriptor->PoolType = PoolType;
916 PoolDescriptor->PoolIndex = PoolIndex;
917 PoolDescriptor->Threshold = Threshold;
918 PoolDescriptor->LockAddress = PoolLock;
919
920 //
921 // Initialize accounting data
922 //
923 PoolDescriptor->RunningAllocs = 0;
924 PoolDescriptor->RunningDeAllocs = 0;
925 PoolDescriptor->TotalPages = 0;
926 PoolDescriptor->TotalBytes = 0;
927 PoolDescriptor->TotalBigPages = 0;
928
929 //
930 // Nothing pending for now
931 //
932 PoolDescriptor->PendingFrees = NULL;
933 PoolDescriptor->PendingFreeDepth = 0;
934
935 //
936 // Loop all the descriptor's allocation lists and initialize them
937 //
938 NextEntry = PoolDescriptor->ListHeads;
939 LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
940 while (NextEntry < LastEntry)
941 {
942 ExpInitializePoolListHead(NextEntry);
943 NextEntry++;
944 }
945
946 //
947 // Note that ReactOS does not support Session Pool Yet
948 //
949 ASSERT(PoolType != PagedPoolSession);
950 }
951
952 VOID
953 NTAPI
954 INIT_SECTION
955 InitializePool(IN POOL_TYPE PoolType,
956 IN ULONG Threshold)
957 {
958 PPOOL_DESCRIPTOR Descriptor;
959 SIZE_T TableSize;
960 ULONG i;
961
962 //
963 // Check what kind of pool this is
964 //
965 if (PoolType == NonPagedPool)
966 {
967 //
968 // Compute the track table size and convert it from a power of two to an
969 // actual byte size
970 //
971 // NOTE: On checked builds, we'll assert if the registry table size was
972 // invalid, while on retail builds we'll just break out of the loop at
973 // that point.
974 //
975 TableSize = min(PoolTrackTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
976 for (i = 0; i < 32; i++)
977 {
978 if (TableSize & 1)
979 {
980 ASSERT((TableSize & ~1) == 0);
981 if (!(TableSize & ~1)) break;
982 }
983 TableSize >>= 1;
984 }
985
986 //
987 // If we hit bit 32, than no size was defined in the registry, so
988 // we'll use the default size of 2048 entries.
989 //
990 // Otherwise, use the size from the registry, as long as it's not
991 // smaller than 64 entries.
992 //
993 if (i == 32)
994 {
995 PoolTrackTableSize = 2048;
996 }
997 else
998 {
999 PoolTrackTableSize = max(1 << i, 64);
1000 }
1001
1002 //
1003 // Loop trying with the biggest specified size first, and cut it down
1004 // by a power of two each iteration in case not enough memory exist
1005 //
1006 while (TRUE)
1007 {
1008 //
1009 // Do not allow overflow
1010 //
1011 if ((PoolTrackTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_TABLE)))
1012 {
1013 PoolTrackTableSize >>= 1;
1014 continue;
1015 }
1016
1017 //
1018 // Allocate the tracker table and exit the loop if this worked
1019 //
1020 PoolTrackTable = MiAllocatePoolPages(NonPagedPool,
1021 (PoolTrackTableSize + 1) *
1022 sizeof(POOL_TRACKER_TABLE));
1023 if (PoolTrackTable) break;
1024
1025 //
1026 // Otherwise, as long as we're not down to the last bit, keep
1027 // iterating
1028 //
1029 if (PoolTrackTableSize == 1)
1030 {
1031 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1032 TableSize,
1033 0xFFFFFFFF,
1034 0xFFFFFFFF,
1035 0xFFFFFFFF);
1036 }
1037 PoolTrackTableSize >>= 1;
1038 }
1039
1040 //
1041 // Add one entry, compute the hash, and zero the table
1042 //
1043 PoolTrackTableSize++;
1044 PoolTrackTableMask = PoolTrackTableSize - 2;
1045
1046 RtlZeroMemory(PoolTrackTable,
1047 PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1048
1049 //
1050 // Finally, add the most used tags to speed up those allocations
1051 //
1052 ExpSeedHotTags();
1053
1054 //
1055 // We now do the exact same thing with the tracker table for big pages
1056 //
1057 TableSize = min(PoolBigPageTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
1058 for (i = 0; i < 32; i++)
1059 {
1060 if (TableSize & 1)
1061 {
1062 ASSERT((TableSize & ~1) == 0);
1063 if (!(TableSize & ~1)) break;
1064 }
1065 TableSize >>= 1;
1066 }
1067
1068 //
1069 // For big pages, the default tracker table is 4096 entries, while the
1070 // minimum is still 64
1071 //
1072 if (i == 32)
1073 {
1074 PoolBigPageTableSize = 4096;
1075 }
1076 else
1077 {
1078 PoolBigPageTableSize = max(1 << i, 64);
1079 }
1080
1081 //
1082 // Again, run the exact same loop we ran earlier, but this time for the
1083 // big pool tracker instead
1084 //
1085 while (TRUE)
1086 {
1087 if ((PoolBigPageTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_BIG_PAGES)))
1088 {
1089 PoolBigPageTableSize >>= 1;
1090 continue;
1091 }
1092
1093 PoolBigPageTable = MiAllocatePoolPages(NonPagedPool,
1094 PoolBigPageTableSize *
1095 sizeof(POOL_TRACKER_BIG_PAGES));
1096 if (PoolBigPageTable) break;
1097
1098 if (PoolBigPageTableSize == 1)
1099 {
1100 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1101 TableSize,
1102 0xFFFFFFFF,
1103 0xFFFFFFFF,
1104 0xFFFFFFFF);
1105 }
1106
1107 PoolBigPageTableSize >>= 1;
1108 }
1109
1110 //
1111 // An extra entry is not needed for for the big pool tracker, so just
1112 // compute the hash and zero it
1113 //
1114 PoolBigPageTableHash = PoolBigPageTableSize - 1;
1115 RtlZeroMemory(PoolBigPageTable,
1116 PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
1117 for (i = 0; i < PoolBigPageTableSize; i++) PoolBigPageTable[i].Va = (PVOID)1;
1118
1119 //
1120 // During development, print this out so we can see what's happening
1121 //
1122 DPRINT("EXPOOL: Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1123 PoolTrackTable, PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1124 DPRINT("EXPOOL: Big Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1125 PoolBigPageTable, PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
1126
1127 //
1128 // Insert the generic tracker for all of big pool
1129 //
1130 ExpInsertPoolTracker('looP',
1131 ROUND_TO_PAGES(PoolBigPageTableSize *
1132 sizeof(POOL_TRACKER_BIG_PAGES)),
1133 NonPagedPool);
1134
1135 //
1136 // No support for NUMA systems at this time
1137 //
1138 ASSERT(KeNumberNodes == 1);
1139
1140 //
1141 // Initialize the tag spinlock
1142 //
1143 KeInitializeSpinLock(&ExpTaggedPoolLock);
1144
1145 //
1146 // Initialize the nonpaged pool descriptor
1147 //
1148 PoolVector[NonPagedPool] = &NonPagedPoolDescriptor;
1149 ExInitializePoolDescriptor(PoolVector[NonPagedPool],
1150 NonPagedPool,
1151 0,
1152 Threshold,
1153 NULL);
1154 }
1155 else
1156 {
1157 //
1158 // No support for NUMA systems at this time
1159 //
1160 ASSERT(KeNumberNodes == 1);
1161
1162 //
1163 // Allocate the pool descriptor
1164 //
1165 Descriptor = ExAllocatePoolWithTag(NonPagedPool,
1166 sizeof(KGUARDED_MUTEX) +
1167 sizeof(POOL_DESCRIPTOR),
1168 'looP');
1169 if (!Descriptor)
1170 {
1171 //
1172 // This is really bad...
1173 //
1174 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1175 0,
1176 -1,
1177 -1,
1178 -1);
1179 }
1180
1181 //
1182 // Setup the vector and guarded mutex for paged pool
1183 //
1184 PoolVector[PagedPool] = Descriptor;
1185 ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1);
1186 ExpPagedPoolDescriptor[0] = Descriptor;
1187 KeInitializeGuardedMutex(ExpPagedPoolMutex);
1188 ExInitializePoolDescriptor(Descriptor,
1189 PagedPool,
1190 0,
1191 Threshold,
1192 ExpPagedPoolMutex);
1193
1194 //
1195 // Insert the generic tracker for all of nonpaged pool
1196 //
1197 ExpInsertPoolTracker('looP',
1198 ROUND_TO_PAGES(PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)),
1199 NonPagedPool);
1200 }
1201 }
1202
1203 FORCEINLINE
1204 KIRQL
1205 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
1206 {
1207 //
1208 // Check if this is nonpaged pool
1209 //
1210 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1211 {
1212 //
1213 // Use the queued spin lock
1214 //
1215 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
1216 }
1217 else
1218 {
1219 //
1220 // Use the guarded mutex
1221 //
1222 KeAcquireGuardedMutex(Descriptor->LockAddress);
1223 return APC_LEVEL;
1224 }
1225 }
1226
1227 FORCEINLINE
1228 VOID
1229 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor,
1230 IN KIRQL OldIrql)
1231 {
1232 //
1233 // Check if this is nonpaged pool
1234 //
1235 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1236 {
1237 //
1238 // Use the queued spin lock
1239 //
1240 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
1241 }
1242 else
1243 {
1244 //
1245 // Use the guarded mutex
1246 //
1247 KeReleaseGuardedMutex(Descriptor->LockAddress);
1248 }
1249 }
1250
1251 VOID
1252 NTAPI
1253 ExpGetPoolTagInfoTarget(IN PKDPC Dpc,
1254 IN PVOID DeferredContext,
1255 IN PVOID SystemArgument1,
1256 IN PVOID SystemArgument2)
1257 {
1258 PPOOL_DPC_CONTEXT Context = DeferredContext;
1259 UNREFERENCED_PARAMETER(Dpc);
1260 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
1261
1262 //
1263 // Make sure we win the race, and if we did, copy the data atomically
1264 //
1265 if (KeSignalCallDpcSynchronize(SystemArgument2))
1266 {
1267 RtlCopyMemory(Context->PoolTrackTable,
1268 PoolTrackTable,
1269 Context->PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1270
1271 //
1272 // This is here because ReactOS does not yet support expansion
1273 //
1274 ASSERT(Context->PoolTrackTableSizeExpansion == 0);
1275 }
1276
1277 //
1278 // Regardless of whether we won or not, we must now synchronize and then
1279 // decrement the barrier since this is one more processor that has completed
1280 // the callback.
1281 //
1282 KeSignalCallDpcSynchronize(SystemArgument2);
1283 KeSignalCallDpcDone(SystemArgument1);
1284 }
1285
1286 NTSTATUS
1287 NTAPI
1288 ExGetPoolTagInfo(IN PSYSTEM_POOLTAG_INFORMATION SystemInformation,
1289 IN ULONG SystemInformationLength,
1290 IN OUT PULONG ReturnLength OPTIONAL)
1291 {
1292 ULONG TableSize, CurrentLength;
1293 ULONG EntryCount;
1294 NTSTATUS Status = STATUS_SUCCESS;
1295 PSYSTEM_POOLTAG TagEntry;
1296 PPOOL_TRACKER_TABLE Buffer, TrackerEntry;
1297 POOL_DPC_CONTEXT Context;
1298 ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
1299
1300 //
1301 // Keep track of how much data the caller's buffer must hold
1302 //
1303 CurrentLength = FIELD_OFFSET(SYSTEM_POOLTAG_INFORMATION, TagInfo);
1304
1305 //
1306 // Initialize the caller's buffer
1307 //
1308 TagEntry = &SystemInformation->TagInfo[0];
1309 SystemInformation->Count = 0;
1310
1311 //
1312 // Capture the number of entries, and the total size needed to make a copy
1313 // of the table
1314 //
1315 EntryCount = (ULONG)PoolTrackTableSize;
1316 TableSize = EntryCount * sizeof(POOL_TRACKER_TABLE);
1317
1318 //
1319 // Allocate the "Generic DPC" temporary buffer
1320 //
1321 Buffer = ExAllocatePoolWithTag(NonPagedPool, TableSize, 'ofnI');
1322 if (!Buffer) return STATUS_INSUFFICIENT_RESOURCES;
1323
1324 //
1325 // Do a "Generic DPC" to atomically retrieve the tag and allocation data
1326 //
1327 Context.PoolTrackTable = Buffer;
1328 Context.PoolTrackTableSize = PoolTrackTableSize;
1329 Context.PoolTrackTableExpansion = NULL;
1330 Context.PoolTrackTableSizeExpansion = 0;
1331 KeGenericCallDpc(ExpGetPoolTagInfoTarget, &Context);
1332
1333 //
1334 // Now parse the results
1335 //
1336 for (TrackerEntry = Buffer; TrackerEntry < (Buffer + EntryCount); TrackerEntry++)
1337 {
1338 //
1339 // If the entry is empty, skip it
1340 //
1341 if (!TrackerEntry->Key) continue;
1342
1343 //
1344 // Otherwise, add one more entry to the caller's buffer, and ensure that
1345 // enough space has been allocated in it
1346 //
1347 SystemInformation->Count++;
1348 CurrentLength += sizeof(*TagEntry);
1349 if (SystemInformationLength < CurrentLength)
1350 {
1351 //
1352 // The caller's buffer is too small, so set a failure code. The
1353 // caller will know the count, as well as how much space is needed.
1354 //
1355 // We do NOT break out of the loop, because we want to keep incrementing
1356 // the Count as well as CurrentLength so that the caller can know the
1357 // final numbers
1358 //
1359 Status = STATUS_INFO_LENGTH_MISMATCH;
1360 }
1361 else
1362 {
1363 //
1364 // Small sanity check that our accounting is working correctly
1365 //
1366 ASSERT(TrackerEntry->PagedAllocs >= TrackerEntry->PagedFrees);
1367 ASSERT(TrackerEntry->NonPagedAllocs >= TrackerEntry->NonPagedFrees);
1368
1369 //
1370 // Return the data into the caller's buffer
1371 //
1372 TagEntry->TagUlong = TrackerEntry->Key;
1373 TagEntry->PagedAllocs = TrackerEntry->PagedAllocs;
1374 TagEntry->PagedFrees = TrackerEntry->PagedFrees;
1375 TagEntry->PagedUsed = TrackerEntry->PagedBytes;
1376 TagEntry->NonPagedAllocs = TrackerEntry->NonPagedAllocs;
1377 TagEntry->NonPagedFrees = TrackerEntry->NonPagedFrees;
1378 TagEntry->NonPagedUsed = TrackerEntry->NonPagedBytes;
1379 TagEntry++;
1380 }
1381 }
1382
1383 //
1384 // Free the "Generic DPC" temporary buffer, return the buffer length and status
1385 //
1386 ExFreePoolWithTag(Buffer, 'ofnI');
1387 if (ReturnLength) *ReturnLength = CurrentLength;
1388 return Status;
1389 }
1390
1391 BOOLEAN
1392 NTAPI
1393 ExpAddTagForBigPages(IN PVOID Va,
1394 IN ULONG Key,
1395 IN ULONG NumberOfPages,
1396 IN POOL_TYPE PoolType)
1397 {
1398 ULONG Hash, i = 0;
1399 PVOID OldVa;
1400 KIRQL OldIrql;
1401 SIZE_T TableSize;
1402 PPOOL_TRACKER_BIG_PAGES Entry, EntryEnd, EntryStart;
1403 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1404 ASSERT(!(PoolType & SESSION_POOL_MASK));
1405
1406 //
1407 // As the table is expandable, these values must only be read after acquiring
1408 // the lock to avoid a teared access during an expansion
1409 //
1410 Hash = ExpComputePartialHashForAddress(Va);
1411 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1412 Hash &= PoolBigPageTableHash;
1413 TableSize = PoolBigPageTableSize;
1414
1415 //
1416 // We loop from the current hash bucket to the end of the table, and then
1417 // rollover to hash bucket 0 and keep going from there. If we return back
1418 // to the beginning, then we attempt expansion at the bottom of the loop
1419 //
1420 EntryStart = Entry = &PoolBigPageTable[Hash];
1421 EntryEnd = &PoolBigPageTable[TableSize];
1422 do
1423 {
1424 //
1425 // Make sure that this is a free entry and attempt to atomically make the
1426 // entry busy now
1427 //
1428 OldVa = Entry->Va;
1429 if (((ULONG_PTR)OldVa & POOL_BIG_TABLE_ENTRY_FREE) &&
1430 (InterlockedCompareExchangePointer(&Entry->Va, Va, OldVa) == OldVa))
1431 {
1432 //
1433 // We now own this entry, write down the size and the pool tag
1434 //
1435 Entry->Key = Key;
1436 Entry->NumberOfPages = NumberOfPages;
1437
1438 //
1439 // Add one more entry to the count, and see if we're getting within
1440 // 25% of the table size, at which point we'll do an expansion now
1441 // to avoid blocking too hard later on.
1442 //
1443 // Note that we only do this if it's also been the 16th time that we
1444 // keep losing the race or that we are not finding a free entry anymore,
1445 // which implies a massive number of concurrent big pool allocations.
1446 //
1447 InterlockedIncrementUL(&ExpPoolBigEntriesInUse);
1448 if ((i >= 16) && (ExpPoolBigEntriesInUse > (TableSize / 4)))
1449 {
1450 DPRINT("Should attempt expansion since we now have %lu entries\n",
1451 ExpPoolBigEntriesInUse);
1452 }
1453
1454 //
1455 // We have our entry, return
1456 //
1457 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1458 return TRUE;
1459 }
1460
1461 //
1462 // We don't have our entry yet, so keep trying, making the entry list
1463 // circular if we reach the last entry. We'll eventually break out of
1464 // the loop once we've rolled over and returned back to our original
1465 // hash bucket
1466 //
1467 i++;
1468 if (++Entry >= EntryEnd) Entry = &PoolBigPageTable[0];
1469 } while (Entry != EntryStart);
1470
1471 //
1472 // This means there's no free hash buckets whatsoever, so we would now have
1473 // to attempt expanding the table
1474 //
1475 DPRINT1("Big pool expansion needed, not implemented!\n");
1476 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1477 return FALSE;
1478 }
1479
1480 ULONG
1481 NTAPI
1482 ExpFindAndRemoveTagBigPages(IN PVOID Va,
1483 OUT PULONG_PTR BigPages,
1484 IN POOL_TYPE PoolType)
1485 {
1486 BOOLEAN FirstTry = TRUE;
1487 SIZE_T TableSize;
1488 KIRQL OldIrql;
1489 ULONG PoolTag, Hash;
1490 PPOOL_TRACKER_BIG_PAGES Entry;
1491 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1492 ASSERT(!(PoolType & SESSION_POOL_MASK));
1493
1494 //
1495 // As the table is expandable, these values must only be read after acquiring
1496 // the lock to avoid a teared access during an expansion
1497 //
1498 Hash = ExpComputePartialHashForAddress(Va);
1499 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1500 Hash &= PoolBigPageTableHash;
1501 TableSize = PoolBigPageTableSize;
1502
1503 //
1504 // Loop while trying to find this big page allocation
1505 //
1506 while (PoolBigPageTable[Hash].Va != Va)
1507 {
1508 //
1509 // Increment the size until we go past the end of the table
1510 //
1511 if (++Hash >= TableSize)
1512 {
1513 //
1514 // Is this the second time we've tried?
1515 //
1516 if (!FirstTry)
1517 {
1518 //
1519 // This means it was never inserted into the pool table and it
1520 // received the special "BIG" tag -- return that and return 0
1521 // so that the code can ask Mm for the page count instead
1522 //
1523 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1524 *BigPages = 0;
1525 return ' GIB';
1526 }
1527
1528 //
1529 // The first time this happens, reset the hash index and try again
1530 //
1531 Hash = 0;
1532 FirstTry = FALSE;
1533 }
1534 }
1535
1536 //
1537 // Now capture all the information we need from the entry, since after we
1538 // release the lock, the data can change
1539 //
1540 Entry = &PoolBigPageTable[Hash];
1541 *BigPages = Entry->NumberOfPages;
1542 PoolTag = Entry->Key;
1543
1544 //
1545 // Set the free bit, and decrement the number of allocations. Finally, release
1546 // the lock and return the tag that was located
1547 //
1548 InterlockedIncrement((PLONG)&Entry->Va);
1549 InterlockedDecrementUL(&ExpPoolBigEntriesInUse);
1550 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1551 return PoolTag;
1552 }
1553
1554 VOID
1555 NTAPI
1556 ExQueryPoolUsage(OUT PULONG PagedPoolPages,
1557 OUT PULONG NonPagedPoolPages,
1558 OUT PULONG PagedPoolAllocs,
1559 OUT PULONG PagedPoolFrees,
1560 OUT PULONG PagedPoolLookasideHits,
1561 OUT PULONG NonPagedPoolAllocs,
1562 OUT PULONG NonPagedPoolFrees,
1563 OUT PULONG NonPagedPoolLookasideHits)
1564 {
1565 ULONG i;
1566 PPOOL_DESCRIPTOR PoolDesc;
1567
1568 //
1569 // Assume all failures
1570 //
1571 *PagedPoolPages = 0;
1572 *PagedPoolAllocs = 0;
1573 *PagedPoolFrees = 0;
1574
1575 //
1576 // Tally up the totals for all the apged pool
1577 //
1578 for (i = 0; i < ExpNumberOfPagedPools + 1; i++)
1579 {
1580 PoolDesc = ExpPagedPoolDescriptor[i];
1581 *PagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1582 *PagedPoolAllocs += PoolDesc->RunningAllocs;
1583 *PagedPoolFrees += PoolDesc->RunningDeAllocs;
1584 }
1585
1586 //
1587 // The first non-paged pool has a hardcoded well-known descriptor name
1588 //
1589 PoolDesc = &NonPagedPoolDescriptor;
1590 *NonPagedPoolPages = PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1591 *NonPagedPoolAllocs = PoolDesc->RunningAllocs;
1592 *NonPagedPoolFrees = PoolDesc->RunningDeAllocs;
1593
1594 //
1595 // If the system has more than one non-paged pool, copy the other descriptor
1596 // totals as well
1597 //
1598 #if 0
1599 if (ExpNumberOfNonPagedPools > 1)
1600 {
1601 for (i = 0; i < ExpNumberOfNonPagedPools; i++)
1602 {
1603 PoolDesc = ExpNonPagedPoolDescriptor[i];
1604 *NonPagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1605 *NonPagedPoolAllocs += PoolDesc->RunningAllocs;
1606 *NonPagedPoolFrees += PoolDesc->RunningDeAllocs;
1607 }
1608 }
1609 #endif
1610
1611 //
1612 // FIXME: Not yet supported
1613 //
1614 *NonPagedPoolLookasideHits += 0;
1615 *PagedPoolLookasideHits += 0;
1616 }
1617
1618 VOID
1619 NTAPI
1620 ExReturnPoolQuota(IN PVOID P)
1621 {
1622 PPOOL_HEADER Entry;
1623 POOL_TYPE PoolType;
1624 USHORT BlockSize;
1625 PEPROCESS Process;
1626
1627 if ((ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) &&
1628 (MmIsSpecialPoolAddress(P)))
1629 {
1630 return;
1631 }
1632
1633 Entry = P;
1634 Entry--;
1635 ASSERT((ULONG_PTR)Entry % POOL_BLOCK_SIZE == 0);
1636
1637 PoolType = Entry->PoolType - 1;
1638 BlockSize = Entry->BlockSize;
1639
1640 if (PoolType & QUOTA_POOL_MASK)
1641 {
1642 Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
1643 ASSERT(Process != NULL);
1644 if (Process)
1645 {
1646 if (Process->Pcb.Header.Type != ProcessObject)
1647 {
1648 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
1649 Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
1650 KeBugCheckEx(BAD_POOL_CALLER,
1651 0x0D,
1652 (ULONG_PTR)P,
1653 Entry->PoolTag,
1654 (ULONG_PTR)Process);
1655 }
1656 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
1657 PsReturnPoolQuota(Process,
1658 PoolType & BASE_POOL_TYPE_MASK,
1659 BlockSize * POOL_BLOCK_SIZE);
1660 ObDereferenceObject(Process);
1661 }
1662 }
1663 }
1664
1665 /* PUBLIC FUNCTIONS ***********************************************************/
1666
1667 /*
1668 * @implemented
1669 */
1670 PVOID
1671 NTAPI
1672 ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
1673 IN SIZE_T NumberOfBytes,
1674 IN ULONG Tag)
1675 {
1676 PPOOL_DESCRIPTOR PoolDesc;
1677 PLIST_ENTRY ListHead;
1678 PPOOL_HEADER Entry, NextEntry, FragmentEntry;
1679 KIRQL OldIrql;
1680 USHORT BlockSize, i;
1681 ULONG OriginalType;
1682 PKPRCB Prcb = KeGetCurrentPrcb();
1683 PGENERAL_LOOKASIDE LookasideList;
1684
1685 //
1686 // Some sanity checks
1687 //
1688 ASSERT(Tag != 0);
1689 ASSERT(Tag != ' GIB');
1690 ASSERT(NumberOfBytes != 0);
1691 ExpCheckPoolIrqlLevel(PoolType, NumberOfBytes, NULL);
1692
1693 //
1694 // Not supported in ReactOS
1695 //
1696 ASSERT(!(PoolType & SESSION_POOL_MASK));
1697
1698 //
1699 // Check if verifier or special pool is enabled
1700 //
1701 if (ExpPoolFlags & (POOL_FLAG_VERIFIER | POOL_FLAG_SPECIAL_POOL))
1702 {
1703 //
1704 // For verifier, we should call the verification routine
1705 //
1706 if (ExpPoolFlags & POOL_FLAG_VERIFIER)
1707 {
1708 DPRINT1("Driver Verifier is not yet supported\n");
1709 }
1710
1711 //
1712 // For special pool, we check if this is a suitable allocation and do
1713 // the special allocation if needed
1714 //
1715 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
1716 {
1717 //
1718 // Check if this is a special pool allocation
1719 //
1720 if (MmUseSpecialPool(NumberOfBytes, Tag))
1721 {
1722 //
1723 // Try to allocate using special pool
1724 //
1725 Entry = MmAllocateSpecialPool(NumberOfBytes, Tag, PoolType, 2);
1726 if (Entry) return Entry;
1727 }
1728 }
1729 }
1730
1731 //
1732 // Get the pool type and its corresponding vector for this request
1733 //
1734 OriginalType = PoolType;
1735 PoolType = PoolType & BASE_POOL_TYPE_MASK;
1736 PoolDesc = PoolVector[PoolType];
1737 ASSERT(PoolDesc != NULL);
1738
1739 //
1740 // Check if this is a big page allocation
1741 //
1742 if (NumberOfBytes > POOL_MAX_ALLOC)
1743 {
1744 //
1745 // Allocate pages for it
1746 //
1747 Entry = MiAllocatePoolPages(OriginalType, NumberOfBytes);
1748 if (!Entry)
1749 {
1750 #if DBG
1751 //
1752 // Out of memory, display current consumption
1753 //
1754 MiDumpPoolConsumers(FALSE, 0);
1755 #endif
1756
1757 //
1758 // Must succeed pool is deprecated, but still supported. These allocation
1759 // failures must cause an immediate bugcheck
1760 //
1761 if (OriginalType & MUST_SUCCEED_POOL_MASK)
1762 {
1763 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1764 NumberOfBytes,
1765 NonPagedPoolDescriptor.TotalPages,
1766 NonPagedPoolDescriptor.TotalBigPages,
1767 0);
1768 }
1769
1770 //
1771 // Internal debugging
1772 //
1773 ExPoolFailures++;
1774
1775 //
1776 // This flag requests printing failures, and can also further specify
1777 // breaking on failures
1778 //
1779 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
1780 {
1781 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
1782 NumberOfBytes,
1783 OriginalType);
1784 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
1785 }
1786
1787 //
1788 // Finally, this flag requests an exception, which we are more than
1789 // happy to raise!
1790 //
1791 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
1792 {
1793 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
1794 }
1795
1796 return NULL;
1797 }
1798
1799 //
1800 // Increment required counters
1801 //
1802 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
1803 (LONG)BYTES_TO_PAGES(NumberOfBytes));
1804 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, NumberOfBytes);
1805 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
1806
1807 //
1808 // Add a tag for the big page allocation and switch to the generic "BIG"
1809 // tag if we failed to do so, then insert a tracker for this alloation.
1810 //
1811 if (!ExpAddTagForBigPages(Entry,
1812 Tag,
1813 (ULONG)BYTES_TO_PAGES(NumberOfBytes),
1814 OriginalType))
1815 {
1816 Tag = ' GIB';
1817 }
1818 ExpInsertPoolTracker(Tag, ROUND_TO_PAGES(NumberOfBytes), OriginalType);
1819 return Entry;
1820 }
1821
1822 //
1823 // Should never request 0 bytes from the pool, but since so many drivers do
1824 // it, we'll just assume they want 1 byte, based on NT's similar behavior
1825 //
1826 if (!NumberOfBytes) NumberOfBytes = 1;
1827
1828 //
1829 // A pool allocation is defined by its data, a linked list to connect it to
1830 // the free list (if necessary), and a pool header to store accounting info.
1831 // Calculate this size, then convert it into a block size (units of pool
1832 // headers)
1833 //
1834 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
1835 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
1836 // the direct allocation of pages.
1837 //
1838 i = (USHORT)((NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1))
1839 / POOL_BLOCK_SIZE);
1840 ASSERT(i < POOL_LISTS_PER_PAGE);
1841
1842 //
1843 // Handle lookaside list optimization for both paged and nonpaged pool
1844 //
1845 if (i <= NUMBER_POOL_LOOKASIDE_LISTS)
1846 {
1847 //
1848 // Try popping it from the per-CPU lookaside list
1849 //
1850 LookasideList = (PoolType == PagedPool) ?
1851 Prcb->PPPagedLookasideList[i - 1].P :
1852 Prcb->PPNPagedLookasideList[i - 1].P;
1853 LookasideList->TotalAllocates++;
1854 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
1855 if (!Entry)
1856 {
1857 //
1858 // We failed, try popping it from the global list
1859 //
1860 LookasideList = (PoolType == PagedPool) ?
1861 Prcb->PPPagedLookasideList[i - 1].L :
1862 Prcb->PPNPagedLookasideList[i - 1].L;
1863 LookasideList->TotalAllocates++;
1864 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
1865 }
1866
1867 //
1868 // If we were able to pop it, update the accounting and return the block
1869 //
1870 if (Entry)
1871 {
1872 LookasideList->AllocateHits++;
1873
1874 //
1875 // Get the real entry, write down its pool type, and track it
1876 //
1877 Entry--;
1878 Entry->PoolType = OriginalType + 1;
1879 ExpInsertPoolTracker(Tag,
1880 Entry->BlockSize * POOL_BLOCK_SIZE,
1881 OriginalType);
1882
1883 //
1884 // Return the pool allocation
1885 //
1886 Entry->PoolTag = Tag;
1887 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
1888 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
1889 return POOL_FREE_BLOCK(Entry);
1890 }
1891 }
1892
1893 //
1894 // Loop in the free lists looking for a block if this size. Start with the
1895 // list optimized for this kind of size lookup
1896 //
1897 ListHead = &PoolDesc->ListHeads[i];
1898 do
1899 {
1900 //
1901 // Are there any free entries available on this list?
1902 //
1903 if (!ExpIsPoolListEmpty(ListHead))
1904 {
1905 //
1906 // Acquire the pool lock now
1907 //
1908 OldIrql = ExLockPool(PoolDesc);
1909
1910 //
1911 // And make sure the list still has entries
1912 //
1913 if (ExpIsPoolListEmpty(ListHead))
1914 {
1915 //
1916 // Someone raced us (and won) before we had a chance to acquire
1917 // the lock.
1918 //
1919 // Try again!
1920 //
1921 ExUnlockPool(PoolDesc, OldIrql);
1922 continue;
1923 }
1924
1925 //
1926 // Remove a free entry from the list
1927 // Note that due to the way we insert free blocks into multiple lists
1928 // there is a guarantee that any block on this list will either be
1929 // of the correct size, or perhaps larger.
1930 //
1931 ExpCheckPoolLinks(ListHead);
1932 Entry = POOL_ENTRY(ExpRemovePoolHeadList(ListHead));
1933 ExpCheckPoolLinks(ListHead);
1934 ExpCheckPoolBlocks(Entry);
1935 ASSERT(Entry->BlockSize >= i);
1936 ASSERT(Entry->PoolType == 0);
1937
1938 //
1939 // Check if this block is larger that what we need. The block could
1940 // not possibly be smaller, due to the reason explained above (and
1941 // we would've asserted on a checked build if this was the case).
1942 //
1943 if (Entry->BlockSize != i)
1944 {
1945 //
1946 // Is there an entry before this one?
1947 //
1948 if (Entry->PreviousSize == 0)
1949 {
1950 //
1951 // There isn't anyone before us, so take the next block and
1952 // turn it into a fragment that contains the leftover data
1953 // that we don't need to satisfy the caller's request
1954 //
1955 FragmentEntry = POOL_BLOCK(Entry, i);
1956 FragmentEntry->BlockSize = Entry->BlockSize - i;
1957
1958 //
1959 // And make it point back to us
1960 //
1961 FragmentEntry->PreviousSize = i;
1962
1963 //
1964 // Now get the block that follows the new fragment and check
1965 // if it's still on the same page as us (and not at the end)
1966 //
1967 NextEntry = POOL_NEXT_BLOCK(FragmentEntry);
1968 if (PAGE_ALIGN(NextEntry) != NextEntry)
1969 {
1970 //
1971 // Adjust this next block to point to our newly created
1972 // fragment block
1973 //
1974 NextEntry->PreviousSize = FragmentEntry->BlockSize;
1975 }
1976 }
1977 else
1978 {
1979 //
1980 // There is a free entry before us, which we know is smaller
1981 // so we'll make this entry the fragment instead
1982 //
1983 FragmentEntry = Entry;
1984
1985 //
1986 // And then we'll remove from it the actual size required.
1987 // Now the entry is a leftover free fragment
1988 //
1989 Entry->BlockSize -= i;
1990
1991 //
1992 // Now let's go to the next entry after the fragment (which
1993 // used to point to our original free entry) and make it
1994 // reference the new fragment entry instead.
1995 //
1996 // This is the entry that will actually end up holding the
1997 // allocation!
1998 //
1999 Entry = POOL_NEXT_BLOCK(Entry);
2000 Entry->PreviousSize = FragmentEntry->BlockSize;
2001
2002 //
2003 // And now let's go to the entry after that one and check if
2004 // it's still on the same page, and not at the end
2005 //
2006 NextEntry = POOL_BLOCK(Entry, i);
2007 if (PAGE_ALIGN(NextEntry) != NextEntry)
2008 {
2009 //
2010 // Make it reference the allocation entry
2011 //
2012 NextEntry->PreviousSize = i;
2013 }
2014 }
2015
2016 //
2017 // Now our (allocation) entry is the right size
2018 //
2019 Entry->BlockSize = i;
2020
2021 //
2022 // And the next entry is now the free fragment which contains
2023 // the remaining difference between how big the original entry
2024 // was, and the actual size the caller needs/requested.
2025 //
2026 FragmentEntry->PoolType = 0;
2027 BlockSize = FragmentEntry->BlockSize;
2028
2029 //
2030 // Now check if enough free bytes remained for us to have a
2031 // "full" entry, which contains enough bytes for a linked list
2032 // and thus can be used for allocations (up to 8 bytes...)
2033 //
2034 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2035 if (BlockSize != 1)
2036 {
2037 //
2038 // Insert the free entry into the free list for this size
2039 //
2040 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2041 POOL_FREE_BLOCK(FragmentEntry));
2042 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2043 }
2044 }
2045
2046 //
2047 // We have found an entry for this allocation, so set the pool type
2048 // and release the lock since we're done
2049 //
2050 Entry->PoolType = OriginalType + 1;
2051 ExpCheckPoolBlocks(Entry);
2052 ExUnlockPool(PoolDesc, OldIrql);
2053
2054 //
2055 // Increment required counters
2056 //
2057 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
2058 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
2059
2060 //
2061 // Track this allocation
2062 //
2063 ExpInsertPoolTracker(Tag,
2064 Entry->BlockSize * POOL_BLOCK_SIZE,
2065 OriginalType);
2066
2067 //
2068 // Return the pool allocation
2069 //
2070 Entry->PoolTag = Tag;
2071 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
2072 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
2073 return POOL_FREE_BLOCK(Entry);
2074 }
2075 } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
2076
2077 //
2078 // There were no free entries left, so we have to allocate a new fresh page
2079 //
2080 Entry = MiAllocatePoolPages(OriginalType, PAGE_SIZE);
2081 if (!Entry)
2082 {
2083 #if DBG
2084 //
2085 // Out of memory, display current consumption
2086 //
2087 MiDumpPoolConsumers(FALSE, 0);
2088 #endif
2089
2090 //
2091 // Must succeed pool is deprecated, but still supported. These allocation
2092 // failures must cause an immediate bugcheck
2093 //
2094 if (OriginalType & MUST_SUCCEED_POOL_MASK)
2095 {
2096 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
2097 PAGE_SIZE,
2098 NonPagedPoolDescriptor.TotalPages,
2099 NonPagedPoolDescriptor.TotalBigPages,
2100 0);
2101 }
2102
2103 //
2104 // Internal debugging
2105 //
2106 ExPoolFailures++;
2107
2108 //
2109 // This flag requests printing failures, and can also further specify
2110 // breaking on failures
2111 //
2112 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
2113 {
2114 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
2115 NumberOfBytes,
2116 OriginalType);
2117 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
2118 }
2119
2120 //
2121 // Finally, this flag requests an exception, which we are more than
2122 // happy to raise!
2123 //
2124 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
2125 {
2126 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
2127 }
2128
2129 //
2130 // Return NULL to the caller in all other cases
2131 //
2132 return NULL;
2133 }
2134
2135 //
2136 // Setup the entry data
2137 //
2138 Entry->Ulong1 = 0;
2139 Entry->BlockSize = i;
2140 Entry->PoolType = OriginalType + 1;
2141
2142 //
2143 // This page will have two entries -- one for the allocation (which we just
2144 // created above), and one for the remaining free bytes, which we're about
2145 // to create now. The free bytes are the whole page minus what was allocated
2146 // and then converted into units of block headers.
2147 //
2148 BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i;
2149 FragmentEntry = POOL_BLOCK(Entry, i);
2150 FragmentEntry->Ulong1 = 0;
2151 FragmentEntry->BlockSize = BlockSize;
2152 FragmentEntry->PreviousSize = i;
2153
2154 //
2155 // Increment required counters
2156 //
2157 InterlockedIncrement((PLONG)&PoolDesc->TotalPages);
2158 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
2159
2160 //
2161 // Now check if enough free bytes remained for us to have a "full" entry,
2162 // which contains enough bytes for a linked list and thus can be used for
2163 // allocations (up to 8 bytes...)
2164 //
2165 if (FragmentEntry->BlockSize != 1)
2166 {
2167 //
2168 // Excellent -- acquire the pool lock
2169 //
2170 OldIrql = ExLockPool(PoolDesc);
2171
2172 //
2173 // And insert the free entry into the free list for this block size
2174 //
2175 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2176 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2177 POOL_FREE_BLOCK(FragmentEntry));
2178 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2179
2180 //
2181 // Release the pool lock
2182 //
2183 ExpCheckPoolBlocks(Entry);
2184 ExUnlockPool(PoolDesc, OldIrql);
2185 }
2186 else
2187 {
2188 //
2189 // Simply do a sanity check
2190 //
2191 ExpCheckPoolBlocks(Entry);
2192 }
2193
2194 //
2195 // Increment performance counters and track this allocation
2196 //
2197 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
2198 ExpInsertPoolTracker(Tag,
2199 Entry->BlockSize * POOL_BLOCK_SIZE,
2200 OriginalType);
2201
2202 //
2203 // And return the pool allocation
2204 //
2205 ExpCheckPoolBlocks(Entry);
2206 Entry->PoolTag = Tag;
2207 return POOL_FREE_BLOCK(Entry);
2208 }
2209
2210 /*
2211 * @implemented
2212 */
2213 PVOID
2214 NTAPI
2215 ExAllocatePool(POOL_TYPE PoolType,
2216 SIZE_T NumberOfBytes)
2217 {
2218 ULONG Tag = TAG_NONE;
2219 #if 0 && DBG
2220 PLDR_DATA_TABLE_ENTRY LdrEntry;
2221
2222 /* Use the first four letters of the driver name, or "None" if unavailable */
2223 LdrEntry = KeGetCurrentIrql() <= APC_LEVEL
2224 ? MiLookupDataTableEntry(_ReturnAddress())
2225 : NULL;
2226 if (LdrEntry)
2227 {
2228 ULONG i;
2229 Tag = 0;
2230 for (i = 0; i < min(4, LdrEntry->BaseDllName.Length / sizeof(WCHAR)); i++)
2231 Tag = Tag >> 8 | (LdrEntry->BaseDllName.Buffer[i] & 0xff) << 24;
2232 for (; i < 4; i++)
2233 Tag = Tag >> 8 | ' ' << 24;
2234 }
2235 #endif
2236 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2237 }
2238
2239 /*
2240 * @implemented
2241 */
2242 VOID
2243 NTAPI
2244 ExFreePoolWithTag(IN PVOID P,
2245 IN ULONG TagToFree)
2246 {
2247 PPOOL_HEADER Entry, NextEntry;
2248 USHORT BlockSize;
2249 KIRQL OldIrql;
2250 POOL_TYPE PoolType;
2251 PPOOL_DESCRIPTOR PoolDesc;
2252 ULONG Tag;
2253 BOOLEAN Combined = FALSE;
2254 PFN_NUMBER PageCount, RealPageCount;
2255 PKPRCB Prcb = KeGetCurrentPrcb();
2256 PGENERAL_LOOKASIDE LookasideList;
2257 PEPROCESS Process;
2258
2259 //
2260 // Check if any of the debug flags are enabled
2261 //
2262 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
2263 POOL_FLAG_CHECK_WORKERS |
2264 POOL_FLAG_CHECK_RESOURCES |
2265 POOL_FLAG_VERIFIER |
2266 POOL_FLAG_CHECK_DEADLOCK |
2267 POOL_FLAG_SPECIAL_POOL))
2268 {
2269 //
2270 // Check if special pool is enabled
2271 //
2272 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
2273 {
2274 //
2275 // Check if it was allocated from a special pool
2276 //
2277 if (MmIsSpecialPoolAddress(P))
2278 {
2279 //
2280 // Was deadlock verification also enabled? We can do some extra
2281 // checks at this point
2282 //
2283 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2284 {
2285 DPRINT1("Verifier not yet supported\n");
2286 }
2287
2288 //
2289 // It is, so handle it via special pool free routine
2290 //
2291 MmFreeSpecialPool(P);
2292 return;
2293 }
2294 }
2295
2296 //
2297 // For non-big page allocations, we'll do a bunch of checks in here
2298 //
2299 if (PAGE_ALIGN(P) != P)
2300 {
2301 //
2302 // Get the entry for this pool allocation
2303 // The pointer math here may look wrong or confusing, but it is quite right
2304 //
2305 Entry = P;
2306 Entry--;
2307
2308 //
2309 // Get the pool type
2310 //
2311 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2312
2313 //
2314 // FIXME: Many other debugging checks go here
2315 //
2316 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2317 }
2318 }
2319
2320 //
2321 // Check if this is a big page allocation
2322 //
2323 if (PAGE_ALIGN(P) == P)
2324 {
2325 //
2326 // We need to find the tag for it, so first we need to find out what
2327 // kind of allocation this was (paged or nonpaged), then we can go
2328 // ahead and try finding the tag for it. Remember to get rid of the
2329 // PROTECTED_POOL tag if it's found.
2330 //
2331 // Note that if at insertion time, we failed to add the tag for a big
2332 // pool allocation, we used a special tag called 'BIG' to identify the
2333 // allocation, and we may get this tag back. In this scenario, we must
2334 // manually get the size of the allocation by actually counting through
2335 // the PFN database.
2336 //
2337 PoolType = MmDeterminePoolType(P);
2338 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2339 Tag = ExpFindAndRemoveTagBigPages(P, &PageCount, PoolType);
2340 if (!Tag)
2341 {
2342 DPRINT1("We do not know the size of this allocation. This is not yet supported\n");
2343 ASSERT(Tag == ' GIB');
2344 PageCount = 1; // We are going to lie! This might screw up accounting?
2345 }
2346 else if (Tag & PROTECTED_POOL)
2347 {
2348 Tag &= ~PROTECTED_POOL;
2349 }
2350
2351 //
2352 // Check block tag
2353 //
2354 if (TagToFree && TagToFree != Tag)
2355 {
2356 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2357 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2358 }
2359
2360 //
2361 // We have our tag and our page count, so we can go ahead and remove this
2362 // tracker now
2363 //
2364 ExpRemovePoolTracker(Tag, PageCount << PAGE_SHIFT, PoolType);
2365
2366 //
2367 // Check if any of the debug flags are enabled
2368 //
2369 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
2370 POOL_FLAG_CHECK_WORKERS |
2371 POOL_FLAG_CHECK_RESOURCES |
2372 POOL_FLAG_CHECK_DEADLOCK))
2373 {
2374 //
2375 // Was deadlock verification also enabled? We can do some extra
2376 // checks at this point
2377 //
2378 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2379 {
2380 DPRINT1("Verifier not yet supported\n");
2381 }
2382
2383 //
2384 // FIXME: Many debugging checks go here
2385 //
2386 }
2387
2388 //
2389 // Update counters
2390 //
2391 PoolDesc = PoolVector[PoolType];
2392 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2393 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes,
2394 -(LONG_PTR)(PageCount << PAGE_SHIFT));
2395
2396 //
2397 // Do the real free now and update the last counter with the big page count
2398 //
2399 RealPageCount = MiFreePoolPages(P);
2400 ASSERT(RealPageCount == PageCount);
2401 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
2402 -(LONG)RealPageCount);
2403 return;
2404 }
2405
2406 //
2407 // Get the entry for this pool allocation
2408 // The pointer math here may look wrong or confusing, but it is quite right
2409 //
2410 Entry = P;
2411 Entry--;
2412 ASSERT((ULONG_PTR)Entry % POOL_BLOCK_SIZE == 0);
2413
2414 //
2415 // Get the size of the entry, and it's pool type, then load the descriptor
2416 // for this pool type
2417 //
2418 BlockSize = Entry->BlockSize;
2419 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2420 PoolDesc = PoolVector[PoolType];
2421
2422 //
2423 // Make sure that the IRQL makes sense
2424 //
2425 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2426
2427 //
2428 // Get the pool tag and get rid of the PROTECTED_POOL flag
2429 //
2430 Tag = Entry->PoolTag;
2431 if (Tag & PROTECTED_POOL) Tag &= ~PROTECTED_POOL;
2432
2433 //
2434 // Check block tag
2435 //
2436 if (TagToFree && TagToFree != Tag)
2437 {
2438 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2439 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2440 }
2441
2442 //
2443 // Track the removal of this allocation
2444 //
2445 ExpRemovePoolTracker(Tag,
2446 BlockSize * POOL_BLOCK_SIZE,
2447 Entry->PoolType - 1);
2448
2449 //
2450 // Release pool quota, if any
2451 //
2452 if ((Entry->PoolType - 1) & QUOTA_POOL_MASK)
2453 {
2454 Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
2455 if (Process)
2456 {
2457 if (Process->Pcb.Header.Type != ProcessObject)
2458 {
2459 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
2460 Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
2461 KeBugCheckEx(BAD_POOL_CALLER,
2462 0x0D,
2463 (ULONG_PTR)P,
2464 Tag,
2465 (ULONG_PTR)Process);
2466 }
2467 PsReturnPoolQuota(Process, PoolType, BlockSize * POOL_BLOCK_SIZE);
2468 ObDereferenceObject(Process);
2469 }
2470 }
2471
2472 //
2473 // Is this allocation small enough to have come from a lookaside list?
2474 //
2475 if (BlockSize <= NUMBER_POOL_LOOKASIDE_LISTS)
2476 {
2477 //
2478 // Try pushing it into the per-CPU lookaside list
2479 //
2480 LookasideList = (PoolType == PagedPool) ?
2481 Prcb->PPPagedLookasideList[BlockSize - 1].P :
2482 Prcb->PPNPagedLookasideList[BlockSize - 1].P;
2483 LookasideList->TotalFrees++;
2484 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2485 {
2486 LookasideList->FreeHits++;
2487 InterlockedPushEntrySList(&LookasideList->ListHead, P);
2488 return;
2489 }
2490
2491 //
2492 // We failed, try to push it into the global lookaside list
2493 //
2494 LookasideList = (PoolType == PagedPool) ?
2495 Prcb->PPPagedLookasideList[BlockSize - 1].L :
2496 Prcb->PPNPagedLookasideList[BlockSize - 1].L;
2497 LookasideList->TotalFrees++;
2498 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2499 {
2500 LookasideList->FreeHits++;
2501 InterlockedPushEntrySList(&LookasideList->ListHead, P);
2502 return;
2503 }
2504 }
2505
2506 //
2507 // Get the pointer to the next entry
2508 //
2509 NextEntry = POOL_BLOCK(Entry, BlockSize);
2510
2511 //
2512 // Update performance counters
2513 //
2514 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2515 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, -BlockSize * POOL_BLOCK_SIZE);
2516
2517 //
2518 // Acquire the pool lock
2519 //
2520 OldIrql = ExLockPool(PoolDesc);
2521
2522 //
2523 // Check if the next allocation is at the end of the page
2524 //
2525 ExpCheckPoolBlocks(Entry);
2526 if (PAGE_ALIGN(NextEntry) != NextEntry)
2527 {
2528 //
2529 // We may be able to combine the block if it's free
2530 //
2531 if (NextEntry->PoolType == 0)
2532 {
2533 //
2534 // The next block is free, so we'll do a combine
2535 //
2536 Combined = TRUE;
2537
2538 //
2539 // Make sure there's actual data in the block -- anything smaller
2540 // than this means we only have the header, so there's no linked list
2541 // for us to remove
2542 //
2543 if ((NextEntry->BlockSize != 1))
2544 {
2545 //
2546 // The block is at least big enough to have a linked list, so go
2547 // ahead and remove it
2548 //
2549 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2550 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2551 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2552 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2553 }
2554
2555 //
2556 // Our entry is now combined with the next entry
2557 //
2558 Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
2559 }
2560 }
2561
2562 //
2563 // Now check if there was a previous entry on the same page as us
2564 //
2565 if (Entry->PreviousSize)
2566 {
2567 //
2568 // Great, grab that entry and check if it's free
2569 //
2570 NextEntry = POOL_PREV_BLOCK(Entry);
2571 if (NextEntry->PoolType == 0)
2572 {
2573 //
2574 // It is, so we can do a combine
2575 //
2576 Combined = TRUE;
2577
2578 //
2579 // Make sure there's actual data in the block -- anything smaller
2580 // than this means we only have the header so there's no linked list
2581 // for us to remove
2582 //
2583 if ((NextEntry->BlockSize != 1))
2584 {
2585 //
2586 // The block is at least big enough to have a linked list, so go
2587 // ahead and remove it
2588 //
2589 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2590 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2591 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2592 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2593 }
2594
2595 //
2596 // Combine our original block (which might've already been combined
2597 // with the next block), into the previous block
2598 //
2599 NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
2600
2601 //
2602 // And now we'll work with the previous block instead
2603 //
2604 Entry = NextEntry;
2605 }
2606 }
2607
2608 //
2609 // By now, it may have been possible for our combined blocks to actually
2610 // have made up a full page (if there were only 2-3 allocations on the
2611 // page, they could've all been combined).
2612 //
2613 if ((PAGE_ALIGN(Entry) == Entry) &&
2614 (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry)) == POOL_NEXT_BLOCK(Entry)))
2615 {
2616 //
2617 // In this case, release the pool lock, update the performance counter,
2618 // and free the page
2619 //
2620 ExUnlockPool(PoolDesc, OldIrql);
2621 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalPages, -1);
2622 MiFreePoolPages(Entry);
2623 return;
2624 }
2625
2626 //
2627 // Otherwise, we now have a free block (or a combination of 2 or 3)
2628 //
2629 Entry->PoolType = 0;
2630 BlockSize = Entry->BlockSize;
2631 ASSERT(BlockSize != 1);
2632
2633 //
2634 // Check if we actually did combine it with anyone
2635 //
2636 if (Combined)
2637 {
2638 //
2639 // Get the first combined block (either our original to begin with, or
2640 // the one after the original, depending if we combined with the previous)
2641 //
2642 NextEntry = POOL_NEXT_BLOCK(Entry);
2643
2644 //
2645 // As long as the next block isn't on a page boundary, have it point
2646 // back to us
2647 //
2648 if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
2649 }
2650
2651 //
2652 // Insert this new free block, and release the pool lock
2653 //
2654 ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
2655 ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry));
2656 ExUnlockPool(PoolDesc, OldIrql);
2657 }
2658
2659 /*
2660 * @implemented
2661 */
2662 VOID
2663 NTAPI
2664 ExFreePool(PVOID P)
2665 {
2666 //
2667 // Just free without checking for the tag
2668 //
2669 ExFreePoolWithTag(P, 0);
2670 }
2671
2672 /*
2673 * @unimplemented
2674 */
2675 SIZE_T
2676 NTAPI
2677 ExQueryPoolBlockSize(IN PVOID PoolBlock,
2678 OUT PBOOLEAN QuotaCharged)
2679 {
2680 //
2681 // Not implemented
2682 //
2683 UNIMPLEMENTED;
2684 return FALSE;
2685 }
2686
2687 /*
2688 * @implemented
2689 */
2690
2691 PVOID
2692 NTAPI
2693 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType,
2694 IN SIZE_T NumberOfBytes)
2695 {
2696 //
2697 // Allocate the pool
2698 //
2699 return ExAllocatePoolWithQuotaTag(PoolType, NumberOfBytes, TAG_NONE);
2700 }
2701
2702 /*
2703 * @implemented
2704 */
2705 PVOID
2706 NTAPI
2707 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType,
2708 IN SIZE_T NumberOfBytes,
2709 IN ULONG Tag,
2710 IN EX_POOL_PRIORITY Priority)
2711 {
2712 PVOID Buffer;
2713
2714 //
2715 // Allocate the pool
2716 //
2717 Buffer = ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2718 if (Buffer == NULL)
2719 {
2720 UNIMPLEMENTED;
2721 }
2722
2723 return Buffer;
2724 }
2725
2726 /*
2727 * @implemented
2728 */
2729 PVOID
2730 NTAPI
2731 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType,
2732 IN SIZE_T NumberOfBytes,
2733 IN ULONG Tag)
2734 {
2735 BOOLEAN Raise = TRUE;
2736 PVOID Buffer;
2737 PPOOL_HEADER Entry;
2738 NTSTATUS Status;
2739 PEPROCESS Process = PsGetCurrentProcess();
2740
2741 //
2742 // Check if we should fail instead of raising an exception
2743 //
2744 if (PoolType & POOL_QUOTA_FAIL_INSTEAD_OF_RAISE)
2745 {
2746 Raise = FALSE;
2747 PoolType &= ~POOL_QUOTA_FAIL_INSTEAD_OF_RAISE;
2748 }
2749
2750 //
2751 // Inject the pool quota mask
2752 //
2753 PoolType += QUOTA_POOL_MASK;
2754
2755 //
2756 // Check if we have enough space to add the quota owner process, as long as
2757 // this isn't the system process, which never gets charged quota
2758 //
2759 ASSERT(NumberOfBytes != 0);
2760 if ((NumberOfBytes <= (PAGE_SIZE - POOL_BLOCK_SIZE - sizeof(PVOID))) &&
2761 (Process != PsInitialSystemProcess))
2762 {
2763 //
2764 // Add space for our EPROCESS pointer
2765 //
2766 NumberOfBytes += sizeof(PEPROCESS);
2767 }
2768 else
2769 {
2770 //
2771 // We won't be able to store the pointer, so don't use quota for this
2772 //
2773 PoolType -= QUOTA_POOL_MASK;
2774 }
2775
2776 //
2777 // Allocate the pool buffer now
2778 //
2779 Buffer = ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2780
2781 //
2782 // If the buffer is page-aligned, this is a large page allocation and we
2783 // won't touch it
2784 //
2785 if (PAGE_ALIGN(Buffer) != Buffer)
2786 {
2787 //
2788 // Also if special pool is enabled, and this was allocated from there,
2789 // we won't touch it either
2790 //
2791 if ((ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) &&
2792 (MmIsSpecialPoolAddress(Buffer)))
2793 {
2794 return Buffer;
2795 }
2796
2797 //
2798 // If it wasn't actually allocated with quota charges, ignore it too
2799 //
2800 if (!(PoolType & QUOTA_POOL_MASK)) return Buffer;
2801
2802 //
2803 // If this is the system process, we don't charge quota, so ignore
2804 //
2805 if (Process == PsInitialSystemProcess) return Buffer;
2806
2807 //
2808 // Actually go and charge quota for the process now
2809 //
2810 Entry = POOL_ENTRY(Buffer);
2811 Status = PsChargeProcessPoolQuota(Process,
2812 PoolType & BASE_POOL_TYPE_MASK,
2813 Entry->BlockSize * POOL_BLOCK_SIZE);
2814 if (!NT_SUCCESS(Status))
2815 {
2816 //
2817 // Quota failed, back out the allocation, clear the owner, and fail
2818 //
2819 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
2820 ExFreePoolWithTag(Buffer, Tag);
2821 if (Raise) RtlRaiseStatus(Status);
2822 return NULL;
2823 }
2824
2825 //
2826 // Quota worked, write the owner and then reference it before returning
2827 //
2828 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = Process;
2829 ObReferenceObject(Process);
2830 }
2831 else if (!(Buffer) && (Raise))
2832 {
2833 //
2834 // The allocation failed, raise an error if we are in raise mode
2835 //
2836 RtlRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
2837 }
2838
2839 //
2840 // Return the allocated buffer
2841 //
2842 return Buffer;
2843 }
2844
2845 #if DBG && defined(KDBG)
2846
2847 BOOLEAN
2848 ExpKdbgExtPool(
2849 ULONG Argc,
2850 PCHAR Argv[])
2851 {
2852 ULONG_PTR Address = 0, Flags = 0;
2853 PVOID PoolPage;
2854 PPOOL_HEADER Entry;
2855 BOOLEAN ThisOne;
2856 PULONG Data;
2857
2858 if (Argc > 1)
2859 {
2860 /* Get address */
2861 if (!KdbpGetHexNumber(Argv[1], &Address))
2862 {
2863 KdbpPrint("Invalid parameter: %s\n", Argv[0]);
2864 return TRUE;
2865 }
2866 }
2867
2868 if (Argc > 2)
2869 {
2870 /* Get address */
2871 if (!KdbpGetHexNumber(Argv[1], &Flags))
2872 {
2873 KdbpPrint("Invalid parameter: %s\n", Argv[0]);
2874 return TRUE;
2875 }
2876 }
2877
2878 /* Check if we got an address */
2879 if (Address != 0)
2880 {
2881 /* Get the base page */
2882 PoolPage = PAGE_ALIGN(Address);
2883 }
2884 else
2885 {
2886 KdbpPrint("Heap is unimplemented\n");
2887 return TRUE;
2888 }
2889
2890 /* No paging support! */
2891 if (!MmIsAddressValid(PoolPage))
2892 {
2893 KdbpPrint("Address not accessible!\n");
2894 return TRUE;
2895 }
2896
2897 /* Get pool type */
2898 if ((Address >= (ULONG_PTR)MmPagedPoolStart) && (Address <= (ULONG_PTR)MmPagedPoolEnd))
2899 KdbpPrint("Allocation is from PagedPool region\n");
2900 else if ((Address >= (ULONG_PTR)MmNonPagedPoolStart) && (Address <= (ULONG_PTR)MmNonPagedPoolEnd))
2901 KdbpPrint("Allocation is from NonPagedPool region\n");
2902 else
2903 {
2904 KdbpPrint("Address 0x%p is not within any pool!\n", (PVOID)Address);
2905 return TRUE;
2906 }
2907
2908 /* Loop all entries of that page */
2909 Entry = PoolPage;
2910 do
2911 {
2912 /* Check if the address is within that entry */
2913 ThisOne = ((Address >= (ULONG_PTR)Entry) &&
2914 (Address < (ULONG_PTR)(Entry + Entry->BlockSize)));
2915
2916 if (!(Flags & 1) || ThisOne)
2917 {
2918 /* Print the line */
2919 KdbpPrint("%c%p size: %4d previous size: %4d %s %.4s\n",
2920 ThisOne ? '*' : ' ', Entry, Entry->BlockSize, Entry->PreviousSize,
2921 (Flags & 0x80000000) ? "" : (Entry->PoolType ? "(Allocated)" : "(Free) "),
2922 (Flags & 0x80000000) ? "" : (PCHAR)&Entry->PoolTag);
2923 }
2924
2925 if (Flags & 1)
2926 {
2927 Data = (PULONG)(Entry + 1);
2928 KdbpPrint(" %p %08lx %08lx %08lx %08lx\n"
2929 " %p %08lx %08lx %08lx %08lx\n",
2930 &Data[0], Data[0], Data[1], Data[2], Data[3],
2931 &Data[4], Data[4], Data[5], Data[6], Data[7]);
2932 }
2933
2934 /* Go to next entry */
2935 Entry = POOL_BLOCK(Entry, Entry->BlockSize);
2936 }
2937 while ((Entry->BlockSize != 0) && ((ULONG_PTR)Entry < (ULONG_PTR)PoolPage + PAGE_SIZE));
2938
2939 return TRUE;
2940 }
2941
2942 BOOLEAN
2943 ExpKdbgExtPoolUsed(
2944 ULONG Argc,
2945 PCHAR Argv[])
2946 {
2947 ULONG Tag = 0;
2948
2949 if (Argc > 1)
2950 {
2951 CHAR Tmp[4];
2952 ULONG Len;
2953
2954 /* Get the tag */
2955 Len = strlen(Argv[1]);
2956 if (Len > 4)
2957 {
2958 Len = 4;
2959 }
2960 RtlCopyMemory(Tmp, Argv[1], Len * sizeof(CHAR));
2961
2962 Tag = *((PULONG)Tmp);
2963 }
2964
2965 MiDumpPoolConsumers(TRUE, Tag);
2966
2967 return TRUE;
2968 }
2969
2970 #endif // DBG && KDBG
2971
2972 /* EOF */