89103807a63020a392a1f8720db597d0d43d6a5b
[reactos.git] / ntoskrnl / mm / ARM3 / expool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
17
18 #undef ExAllocatePoolWithQuota
19 #undef ExAllocatePoolWithQuotaTag
20
21 /* GLOBALS ********************************************************************/
22
23 #define POOL_BIG_TABLE_ENTRY_FREE 0x1
24
25 typedef struct _POOL_DPC_CONTEXT
26 {
27 PPOOL_TRACKER_TABLE PoolTrackTable;
28 SIZE_T PoolTrackTableSize;
29 PPOOL_TRACKER_TABLE PoolTrackTableExpansion;
30 SIZE_T PoolTrackTableSizeExpansion;
31 } POOL_DPC_CONTEXT, *PPOOL_DPC_CONTEXT;
32
33 ULONG ExpNumberOfPagedPools;
34 POOL_DESCRIPTOR NonPagedPoolDescriptor;
35 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16 + 1];
36 PPOOL_DESCRIPTOR PoolVector[2];
37 PKGUARDED_MUTEX ExpPagedPoolMutex;
38 SIZE_T PoolTrackTableSize, PoolTrackTableMask;
39 SIZE_T PoolBigPageTableSize, PoolBigPageTableHash;
40 PPOOL_TRACKER_TABLE PoolTrackTable;
41 PPOOL_TRACKER_BIG_PAGES PoolBigPageTable;
42 KSPIN_LOCK ExpTaggedPoolLock;
43 ULONG PoolHitTag;
44 BOOLEAN ExStopBadTags;
45 KSPIN_LOCK ExpLargePoolTableLock;
46 ULONG ExpPoolBigEntriesInUse;
47 ULONG ExpPoolFlags;
48 ULONG ExPoolFailures;
49
50 /* Pool block/header/list access macros */
51 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
52 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER))
53 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
54 #define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
55 #define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize))
56
57 /*
58 * Pool list access debug macros, similar to Arthur's pfnlist.c work.
59 * Microsoft actually implements similar checks in the Windows Server 2003 SP1
60 * pool code, but only for checked builds.
61 *
62 * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
63 * that these checks are done even on retail builds, due to the increasing
64 * number of kernel-mode attacks which depend on dangling list pointers and other
65 * kinds of list-based attacks.
66 *
67 * For now, I will leave these checks on all the time, but later they are likely
68 * to be DBG-only, at least until there are enough kernel-mode security attacks
69 * against ReactOS to warrant the performance hit.
70 *
71 * For now, these are not made inline, so we can get good stack traces.
72 */
73 PLIST_ENTRY
74 NTAPI
75 ExpDecodePoolLink(IN PLIST_ENTRY Link)
76 {
77 return (PLIST_ENTRY)((ULONG_PTR)Link & ~1);
78 }
79
80 PLIST_ENTRY
81 NTAPI
82 ExpEncodePoolLink(IN PLIST_ENTRY Link)
83 {
84 return (PLIST_ENTRY)((ULONG_PTR)Link | 1);
85 }
86
87 VOID
88 NTAPI
89 ExpCheckPoolLinks(IN PLIST_ENTRY ListHead)
90 {
91 if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) ||
92 (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead))
93 {
94 KeBugCheckEx(BAD_POOL_HEADER,
95 3,
96 (ULONG_PTR)ListHead,
97 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink),
98 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink));
99 }
100 }
101
102 VOID
103 NTAPI
104 ExpInitializePoolListHead(IN PLIST_ENTRY ListHead)
105 {
106 ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead);
107 }
108
109 BOOLEAN
110 NTAPI
111 ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead)
112 {
113 return (ExpDecodePoolLink(ListHead->Flink) == ListHead);
114 }
115
116 VOID
117 NTAPI
118 ExpRemovePoolEntryList(IN PLIST_ENTRY Entry)
119 {
120 PLIST_ENTRY Blink, Flink;
121 Flink = ExpDecodePoolLink(Entry->Flink);
122 Blink = ExpDecodePoolLink(Entry->Blink);
123 Flink->Blink = ExpEncodePoolLink(Blink);
124 Blink->Flink = ExpEncodePoolLink(Flink);
125 }
126
127 PLIST_ENTRY
128 NTAPI
129 ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead)
130 {
131 PLIST_ENTRY Entry, Flink;
132 Entry = ExpDecodePoolLink(ListHead->Flink);
133 Flink = ExpDecodePoolLink(Entry->Flink);
134 ListHead->Flink = ExpEncodePoolLink(Flink);
135 Flink->Blink = ExpEncodePoolLink(ListHead);
136 return Entry;
137 }
138
139 PLIST_ENTRY
140 NTAPI
141 ExpRemovePoolTailList(IN PLIST_ENTRY ListHead)
142 {
143 PLIST_ENTRY Entry, Blink;
144 Entry = ExpDecodePoolLink(ListHead->Blink);
145 Blink = ExpDecodePoolLink(Entry->Blink);
146 ListHead->Blink = ExpEncodePoolLink(Blink);
147 Blink->Flink = ExpEncodePoolLink(ListHead);
148 return Entry;
149 }
150
151 VOID
152 NTAPI
153 ExpInsertPoolTailList(IN PLIST_ENTRY ListHead,
154 IN PLIST_ENTRY Entry)
155 {
156 PLIST_ENTRY Blink;
157 ExpCheckPoolLinks(ListHead);
158 Blink = ExpDecodePoolLink(ListHead->Blink);
159 Entry->Flink = ExpEncodePoolLink(ListHead);
160 Entry->Blink = ExpEncodePoolLink(Blink);
161 Blink->Flink = ExpEncodePoolLink(Entry);
162 ListHead->Blink = ExpEncodePoolLink(Entry);
163 ExpCheckPoolLinks(ListHead);
164 }
165
166 VOID
167 NTAPI
168 ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead,
169 IN PLIST_ENTRY Entry)
170 {
171 PLIST_ENTRY Flink;
172 ExpCheckPoolLinks(ListHead);
173 Flink = ExpDecodePoolLink(ListHead->Flink);
174 Entry->Flink = ExpEncodePoolLink(Flink);
175 Entry->Blink = ExpEncodePoolLink(ListHead);
176 Flink->Blink = ExpEncodePoolLink(Entry);
177 ListHead->Flink = ExpEncodePoolLink(Entry);
178 ExpCheckPoolLinks(ListHead);
179 }
180
181 VOID
182 NTAPI
183 ExpCheckPoolHeader(IN PPOOL_HEADER Entry)
184 {
185 PPOOL_HEADER PreviousEntry, NextEntry;
186
187 /* Is there a block before this one? */
188 if (Entry->PreviousSize)
189 {
190 /* Get it */
191 PreviousEntry = POOL_PREV_BLOCK(Entry);
192
193 /* The two blocks must be on the same page! */
194 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry))
195 {
196 /* Something is awry */
197 KeBugCheckEx(BAD_POOL_HEADER,
198 6,
199 (ULONG_PTR)PreviousEntry,
200 __LINE__,
201 (ULONG_PTR)Entry);
202 }
203
204 /* This block should also indicate that it's as large as we think it is */
205 if (PreviousEntry->BlockSize != Entry->PreviousSize)
206 {
207 /* Otherwise, someone corrupted one of the sizes */
208 DPRINT1("PreviousEntry BlockSize %lu, tag %.4s. Entry PreviousSize %lu, tag %.4s\n",
209 PreviousEntry->BlockSize, (char *)&PreviousEntry->PoolTag,
210 Entry->PreviousSize, (char *)&Entry->PoolTag);
211 KeBugCheckEx(BAD_POOL_HEADER,
212 5,
213 (ULONG_PTR)PreviousEntry,
214 __LINE__,
215 (ULONG_PTR)Entry);
216 }
217 }
218 else if (PAGE_ALIGN(Entry) != Entry)
219 {
220 /* If there's no block before us, we are the first block, so we should be on a page boundary */
221 KeBugCheckEx(BAD_POOL_HEADER,
222 7,
223 0,
224 __LINE__,
225 (ULONG_PTR)Entry);
226 }
227
228 /* This block must have a size */
229 if (!Entry->BlockSize)
230 {
231 /* Someone must've corrupted this field */
232 if (Entry->PreviousSize)
233 {
234 PreviousEntry = POOL_PREV_BLOCK(Entry);
235 DPRINT1("PreviousEntry tag %.4s. Entry tag %.4s\n",
236 (char *)&PreviousEntry->PoolTag,
237 (char *)&Entry->PoolTag);
238 }
239 else
240 {
241 DPRINT1("Entry tag %.4s\n",
242 (char *)&Entry->PoolTag);
243 }
244 KeBugCheckEx(BAD_POOL_HEADER,
245 8,
246 0,
247 __LINE__,
248 (ULONG_PTR)Entry);
249 }
250
251 /* Okay, now get the next block */
252 NextEntry = POOL_NEXT_BLOCK(Entry);
253
254 /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
255 if (PAGE_ALIGN(NextEntry) != NextEntry)
256 {
257 /* The two blocks must be on the same page! */
258 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry))
259 {
260 /* Something is messed up */
261 KeBugCheckEx(BAD_POOL_HEADER,
262 9,
263 (ULONG_PTR)NextEntry,
264 __LINE__,
265 (ULONG_PTR)Entry);
266 }
267
268 /* And this block should think we are as large as we truly are */
269 if (NextEntry->PreviousSize != Entry->BlockSize)
270 {
271 /* Otherwise, someone corrupted the field */
272 DPRINT1("Entry BlockSize %lu, tag %.4s. NextEntry PreviousSize %lu, tag %.4s\n",
273 Entry->BlockSize, (char *)&Entry->PoolTag,
274 NextEntry->PreviousSize, (char *)&NextEntry->PoolTag);
275 KeBugCheckEx(BAD_POOL_HEADER,
276 5,
277 (ULONG_PTR)NextEntry,
278 __LINE__,
279 (ULONG_PTR)Entry);
280 }
281 }
282 }
283
284 VOID
285 NTAPI
286 ExpCheckPoolAllocation(
287 PVOID P,
288 POOL_TYPE PoolType,
289 ULONG Tag)
290 {
291 PPOOL_HEADER Entry;
292 ULONG i;
293 KIRQL OldIrql;
294 POOL_TYPE RealPoolType;
295
296 /* Get the pool header */
297 Entry = ((PPOOL_HEADER)P) - 1;
298
299 /* Check if this is a large allocation */
300 if (PAGE_ALIGN(P) == P)
301 {
302 /* Lock the pool table */
303 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
304
305 /* Find the pool tag */
306 for (i = 0; i < PoolBigPageTableSize; i++)
307 {
308 /* Check if this is our allocation */
309 if (PoolBigPageTable[i].Va == P)
310 {
311 /* Make sure the tag is ok */
312 if (PoolBigPageTable[i].Key != Tag)
313 {
314 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, PoolBigPageTable[i].Key, Tag);
315 }
316
317 break;
318 }
319 }
320
321 /* Release the lock */
322 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
323
324 if (i == PoolBigPageTableSize)
325 {
326 /* Did not find the allocation */
327 //ASSERT(FALSE);
328 }
329
330 /* Get Pool type by address */
331 RealPoolType = MmDeterminePoolType(P);
332 }
333 else
334 {
335 /* Verify the tag */
336 if (Entry->PoolTag != Tag)
337 {
338 DPRINT1("Allocation has wrong pool tag! Expected '%.4s', got '%.4s' (0x%08lx)\n",
339 &Tag, &Entry->PoolTag, Entry->PoolTag);
340 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Entry->PoolTag, Tag);
341 }
342
343 /* Check the rest of the header */
344 ExpCheckPoolHeader(Entry);
345
346 /* Get Pool type from entry */
347 RealPoolType = (Entry->PoolType - 1);
348 }
349
350 /* Should we check the pool type? */
351 if (PoolType != -1)
352 {
353 /* Verify the pool type */
354 if (RealPoolType != PoolType)
355 {
356 DPRINT1("Wrong pool type! Expected %s, got %s\n",
357 PoolType & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool",
358 (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool");
359 KeBugCheckEx(BAD_POOL_CALLER, 0xCC, (ULONG_PTR)P, Entry->PoolTag, Tag);
360 }
361 }
362 }
363
364 VOID
365 NTAPI
366 ExpCheckPoolBlocks(IN PVOID Block)
367 {
368 BOOLEAN FoundBlock = FALSE;
369 SIZE_T Size = 0;
370 PPOOL_HEADER Entry;
371
372 /* Get the first entry for this page, make sure it really is the first */
373 Entry = PAGE_ALIGN(Block);
374 ASSERT(Entry->PreviousSize == 0);
375
376 /* Now scan each entry */
377 while (TRUE)
378 {
379 /* When we actually found our block, remember this */
380 if (Entry == Block) FoundBlock = TRUE;
381
382 /* Now validate this block header */
383 ExpCheckPoolHeader(Entry);
384
385 /* And go to the next one, keeping track of our size */
386 Size += Entry->BlockSize;
387 Entry = POOL_NEXT_BLOCK(Entry);
388
389 /* If we hit the last block, stop */
390 if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break;
391
392 /* If we hit the end of the page, stop */
393 if (PAGE_ALIGN(Entry) == Entry) break;
394 }
395
396 /* We must've found our block, and we must have hit the end of the page */
397 if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock))
398 {
399 /* Otherwise, the blocks are messed up */
400 KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry);
401 }
402 }
403
404 FORCEINLINE
405 VOID
406 ExpCheckPoolIrqlLevel(IN POOL_TYPE PoolType,
407 IN SIZE_T NumberOfBytes,
408 IN PVOID Entry)
409 {
410 //
411 // Validate IRQL: It must be APC_LEVEL or lower for Paged Pool, and it must
412 // be DISPATCH_LEVEL or lower for Non Paged Pool
413 //
414 if (((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) ?
415 (KeGetCurrentIrql() > APC_LEVEL) :
416 (KeGetCurrentIrql() > DISPATCH_LEVEL))
417 {
418 //
419 // Take the system down
420 //
421 KeBugCheckEx(BAD_POOL_CALLER,
422 !Entry ? POOL_ALLOC_IRQL_INVALID : POOL_FREE_IRQL_INVALID,
423 KeGetCurrentIrql(),
424 PoolType,
425 !Entry ? NumberOfBytes : (ULONG_PTR)Entry);
426 }
427 }
428
429 FORCEINLINE
430 ULONG
431 ExpComputeHashForTag(IN ULONG Tag,
432 IN SIZE_T BucketMask)
433 {
434 //
435 // Compute the hash by multiplying with a large prime number and then XORing
436 // with the HIDWORD of the result.
437 //
438 // Finally, AND with the bucket mask to generate a valid index/bucket into
439 // the table
440 //
441 ULONGLONG Result = (ULONGLONG)40543 * Tag;
442 return (ULONG)BucketMask & ((ULONG)Result ^ (Result >> 32));
443 }
444
445 FORCEINLINE
446 ULONG
447 ExpComputePartialHashForAddress(IN PVOID BaseAddress)
448 {
449 ULONG Result;
450 //
451 // Compute the hash by converting the address into a page number, and then
452 // XORing each nibble with the next one.
453 //
454 // We do *NOT* AND with the bucket mask at this point because big table expansion
455 // might happen. Therefore, the final step of the hash must be performed
456 // while holding the expansion pushlock, and this is why we call this a
457 // "partial" hash only.
458 //
459 Result = (ULONG)((ULONG_PTR)BaseAddress >> PAGE_SHIFT);
460 return (Result >> 24) ^ (Result >> 16) ^ (Result >> 8) ^ Result;
461 }
462
463 #if DBG
464 FORCEINLINE
465 BOOLEAN
466 ExpTagAllowPrint(CHAR Tag)
467 {
468 if ((Tag >= 'a' && Tag <= 'z') ||
469 (Tag >= 'A' && Tag <= 'Z') ||
470 Tag == ' ')
471 {
472 return TRUE;
473 }
474
475 return FALSE;
476 }
477
478 #ifdef KDBG
479 #define MiDumperPrint(dbg, fmt, ...) \
480 if (dbg) KdbpPrint(fmt, ##__VA_ARGS__); \
481 else DPRINT1(fmt, ##__VA_ARGS__)
482 #else
483 #define MiDumperPrint(dbg, fmt, ...) \
484 DPRINT1(fmt, ##__VA_ARGS__)
485 #endif
486
487 VOID
488 MiDumpPoolConsumers(BOOLEAN CalledFromDbg, ULONG Tag, ULONG Mask, ULONG Flags)
489 {
490 SIZE_T i;
491 BOOLEAN Verbose;
492
493 //
494 // Only print header if called from OOM situation
495 //
496 if (!CalledFromDbg)
497 {
498 DPRINT1("---------------------\n");
499 DPRINT1("Out of memory dumper!\n");
500 }
501 #ifdef KDBG
502 else
503 {
504 KdbpPrint("Pool Used:\n");
505 }
506 #endif
507
508 //
509 // Remember whether we'll have to be verbose
510 // This is the only supported flag!
511 //
512 Verbose = BooleanFlagOn(Flags, 1);
513
514 //
515 // Print table header
516 //
517 if (Verbose)
518 {
519 MiDumperPrint(CalledFromDbg, "\t\t\t\tNonPaged\t\t\t\t\t\t\tPaged\n");
520 MiDumperPrint(CalledFromDbg, "Tag\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\n");
521 }
522 else
523 {
524 MiDumperPrint(CalledFromDbg, "\t\tNonPaged\t\t\tPaged\n");
525 MiDumperPrint(CalledFromDbg, "Tag\t\tAllocs\t\tUsed\t\tAllocs\t\tUsed\n");
526 }
527
528 //
529 // We'll extract allocations for all the tracked pools
530 //
531 for (i = 0; i < PoolTrackTableSize; ++i)
532 {
533 PPOOL_TRACKER_TABLE TableEntry;
534
535 TableEntry = &PoolTrackTable[i];
536
537 //
538 // We only care about tags which have allocated memory
539 //
540 if (TableEntry->NonPagedBytes != 0 || TableEntry->PagedBytes != 0)
541 {
542 //
543 // If there's a tag, attempt to do a pretty print
544 // only if it matches the caller's tag, or if
545 // any tag is allowed
546 // For checking whether it matches caller's tag,
547 // use the mask to make sure not to mess with the wildcards
548 //
549 if (TableEntry->Key != 0 && TableEntry->Key != TAG_NONE &&
550 (Tag == 0 || (TableEntry->Key & Mask) == (Tag & Mask)))
551 {
552 CHAR Tag[4];
553
554 //
555 // Extract each 'component' and check whether they are printable
556 //
557 Tag[0] = TableEntry->Key & 0xFF;
558 Tag[1] = TableEntry->Key >> 8 & 0xFF;
559 Tag[2] = TableEntry->Key >> 16 & 0xFF;
560 Tag[3] = TableEntry->Key >> 24 & 0xFF;
561
562 if (ExpTagAllowPrint(Tag[0]) && ExpTagAllowPrint(Tag[1]) && ExpTagAllowPrint(Tag[2]) && ExpTagAllowPrint(Tag[3]))
563 {
564 //
565 // Print in direct order to make !poolused TAG usage easier
566 //
567 if (Verbose)
568 {
569 MiDumperPrint(CalledFromDbg, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag[0], Tag[1], Tag[2], Tag[3],
570 TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees,
571 (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes,
572 TableEntry->PagedAllocs, TableEntry->PagedFrees,
573 (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes);
574 }
575 else
576 {
577 MiDumperPrint(CalledFromDbg, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag[0], Tag[1], Tag[2], Tag[3],
578 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
579 TableEntry->PagedAllocs, TableEntry->PagedBytes);
580 }
581 }
582 else
583 {
584 if (Verbose)
585 {
586 MiDumperPrint(CalledFromDbg, "%x\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry->Key,
587 TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees,
588 (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes,
589 TableEntry->PagedAllocs, TableEntry->PagedFrees,
590 (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes);
591 }
592 else
593 {
594 MiDumperPrint(CalledFromDbg, "%x\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry->Key,
595 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
596 TableEntry->PagedAllocs, TableEntry->PagedBytes);
597 }
598 }
599 }
600 else if (Tag == 0 || (Tag & Mask) == (TAG_NONE & Mask))
601 {
602 if (Verbose)
603 {
604 MiDumperPrint(CalledFromDbg, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n",
605 TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees,
606 (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes,
607 TableEntry->PagedAllocs, TableEntry->PagedFrees,
608 (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes);
609 }
610 else
611 {
612 MiDumperPrint(CalledFromDbg, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n",
613 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
614 TableEntry->PagedAllocs, TableEntry->PagedBytes);
615 }
616 }
617 }
618 }
619
620 if (!CalledFromDbg)
621 {
622 DPRINT1("---------------------\n");
623 }
624 }
625 #endif
626
627 /* PRIVATE FUNCTIONS **********************************************************/
628
629 VOID
630 NTAPI
631 INIT_SECTION
632 ExpSeedHotTags(VOID)
633 {
634 ULONG i, Key, Hash, Index;
635 PPOOL_TRACKER_TABLE TrackTable = PoolTrackTable;
636 ULONG TagList[] =
637 {
638 ' oI',
639 ' laH',
640 'PldM',
641 'LooP',
642 'tSbO',
643 ' prI',
644 'bdDN',
645 'LprI',
646 'pOoI',
647 ' ldM',
648 'eliF',
649 'aVMC',
650 'dSeS',
651 'CFtN',
652 'looP',
653 'rPCT',
654 'bNMC',
655 'dTeS',
656 'sFtN',
657 'TPCT',
658 'CPCT',
659 ' yeK',
660 'qSbO',
661 'mNoI',
662 'aEoI',
663 'cPCT',
664 'aFtN',
665 '0ftN',
666 'tceS',
667 'SprI',
668 'ekoT',
669 ' eS',
670 'lCbO',
671 'cScC',
672 'lFtN',
673 'cAeS',
674 'mfSF',
675 'kWcC',
676 'miSF',
677 'CdfA',
678 'EdfA',
679 'orSF',
680 'nftN',
681 'PRIU',
682 'rFpN',
683 'RFpN',
684 'aPeS',
685 'sUeS',
686 'FpcA',
687 'MpcA',
688 'cSeS',
689 'mNbO',
690 'sFpN',
691 'uLeS',
692 'DPcS',
693 'nevE',
694 'vrqR',
695 'ldaV',
696 ' pP',
697 'SdaV',
698 ' daV',
699 'LdaV',
700 'FdaV',
701 ' GIB',
702 };
703
704 //
705 // Loop all 64 hot tags
706 //
707 ASSERT((sizeof(TagList) / sizeof(ULONG)) == 64);
708 for (i = 0; i < sizeof(TagList) / sizeof(ULONG); i++)
709 {
710 //
711 // Get the current tag, and compute its hash in the tracker table
712 //
713 Key = TagList[i];
714 Hash = ExpComputeHashForTag(Key, PoolTrackTableMask);
715
716 //
717 // Loop all the hashes in this index/bucket
718 //
719 Index = Hash;
720 while (TRUE)
721 {
722 //
723 // Find an empty entry, and make sure this isn't the last hash that
724 // can fit.
725 //
726 // On checked builds, also make sure this is the first time we are
727 // seeding this tag.
728 //
729 ASSERT(TrackTable[Hash].Key != Key);
730 if (!(TrackTable[Hash].Key) && (Hash != PoolTrackTableSize - 1))
731 {
732 //
733 // It has been seeded, move on to the next tag
734 //
735 TrackTable[Hash].Key = Key;
736 break;
737 }
738
739 //
740 // This entry was already taken, compute the next possible hash while
741 // making sure we're not back at our initial index.
742 //
743 ASSERT(TrackTable[Hash].Key != Key);
744 Hash = (Hash + 1) & PoolTrackTableMask;
745 if (Hash == Index) break;
746 }
747 }
748 }
749
750 VOID
751 NTAPI
752 ExpRemovePoolTracker(IN ULONG Key,
753 IN SIZE_T NumberOfBytes,
754 IN POOL_TYPE PoolType)
755 {
756 ULONG Hash, Index;
757 PPOOL_TRACKER_TABLE Table, TableEntry;
758 SIZE_T TableMask, TableSize;
759
760 //
761 // Remove the PROTECTED_POOL flag which is not part of the tag
762 //
763 Key &= ~PROTECTED_POOL;
764
765 //
766 // With WinDBG you can set a tag you want to break on when an allocation is
767 // attempted
768 //
769 if (Key == PoolHitTag) DbgBreakPoint();
770
771 //
772 // Why the double indirection? Because normally this function is also used
773 // when doing session pool allocations, which has another set of tables,
774 // sizes, and masks that live in session pool. Now we don't support session
775 // pool so we only ever use the regular tables, but I'm keeping the code this
776 // way so that the day we DO support session pool, it won't require that
777 // many changes
778 //
779 Table = PoolTrackTable;
780 TableMask = PoolTrackTableMask;
781 TableSize = PoolTrackTableSize;
782 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize);
783
784 //
785 // Compute the hash for this key, and loop all the possible buckets
786 //
787 Hash = ExpComputeHashForTag(Key, TableMask);
788 Index = Hash;
789 while (TRUE)
790 {
791 //
792 // Have we found the entry for this tag? */
793 //
794 TableEntry = &Table[Hash];
795 if (TableEntry->Key == Key)
796 {
797 //
798 // Decrement the counters depending on if this was paged or nonpaged
799 // pool
800 //
801 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
802 {
803 InterlockedIncrement(&TableEntry->NonPagedFrees);
804 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes,
805 -(SSIZE_T)NumberOfBytes);
806 return;
807 }
808 InterlockedIncrement(&TableEntry->PagedFrees);
809 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes,
810 -(SSIZE_T)NumberOfBytes);
811 return;
812 }
813
814 //
815 // We should have only ended up with an empty entry if we've reached
816 // the last bucket
817 //
818 if (!TableEntry->Key)
819 {
820 DPRINT1("Empty item reached in tracker table. Hash=0x%lx, TableMask=0x%lx, Tag=0x%08lx, NumberOfBytes=%lu, PoolType=%d\n",
821 Hash, TableMask, Key, (ULONG)NumberOfBytes, PoolType);
822 ASSERT(Hash == TableMask);
823 }
824
825 //
826 // This path is hit when we don't have an entry, and the current bucket
827 // is full, so we simply try the next one
828 //
829 Hash = (Hash + 1) & TableMask;
830 if (Hash == Index) break;
831 }
832
833 //
834 // And finally this path is hit when all the buckets are full, and we need
835 // some expansion. This path is not yet supported in ReactOS and so we'll
836 // ignore the tag
837 //
838 DPRINT1("Out of pool tag space, ignoring...\n");
839 }
840
841 VOID
842 NTAPI
843 ExpInsertPoolTracker(IN ULONG Key,
844 IN SIZE_T NumberOfBytes,
845 IN POOL_TYPE PoolType)
846 {
847 ULONG Hash, Index;
848 KIRQL OldIrql;
849 PPOOL_TRACKER_TABLE Table, TableEntry;
850 SIZE_T TableMask, TableSize;
851
852 //
853 // Remove the PROTECTED_POOL flag which is not part of the tag
854 //
855 Key &= ~PROTECTED_POOL;
856
857 //
858 // With WinDBG you can set a tag you want to break on when an allocation is
859 // attempted
860 //
861 if (Key == PoolHitTag) DbgBreakPoint();
862
863 //
864 // There is also an internal flag you can set to break on malformed tags
865 //
866 if (ExStopBadTags) ASSERT(Key & 0xFFFFFF00);
867
868 //
869 // ASSERT on ReactOS features not yet supported
870 //
871 ASSERT(!(PoolType & SESSION_POOL_MASK));
872 ASSERT(KeGetCurrentProcessorNumber() == 0);
873
874 //
875 // Why the double indirection? Because normally this function is also used
876 // when doing session pool allocations, which has another set of tables,
877 // sizes, and masks that live in session pool. Now we don't support session
878 // pool so we only ever use the regular tables, but I'm keeping the code this
879 // way so that the day we DO support session pool, it won't require that
880 // many changes
881 //
882 Table = PoolTrackTable;
883 TableMask = PoolTrackTableMask;
884 TableSize = PoolTrackTableSize;
885 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize);
886
887 //
888 // Compute the hash for this key, and loop all the possible buckets
889 //
890 Hash = ExpComputeHashForTag(Key, TableMask);
891 Index = Hash;
892 while (TRUE)
893 {
894 //
895 // Do we already have an entry for this tag? */
896 //
897 TableEntry = &Table[Hash];
898 if (TableEntry->Key == Key)
899 {
900 //
901 // Increment the counters depending on if this was paged or nonpaged
902 // pool
903 //
904 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
905 {
906 InterlockedIncrement(&TableEntry->NonPagedAllocs);
907 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes, NumberOfBytes);
908 return;
909 }
910 InterlockedIncrement(&TableEntry->PagedAllocs);
911 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes, NumberOfBytes);
912 return;
913 }
914
915 //
916 // We don't have an entry yet, but we've found a free bucket for it
917 //
918 if (!(TableEntry->Key) && (Hash != PoolTrackTableSize - 1))
919 {
920 //
921 // We need to hold the lock while creating a new entry, since other
922 // processors might be in this code path as well
923 //
924 ExAcquireSpinLock(&ExpTaggedPoolLock, &OldIrql);
925 if (!PoolTrackTable[Hash].Key)
926 {
927 //
928 // We've won the race, so now create this entry in the bucket
929 //
930 ASSERT(Table[Hash].Key == 0);
931 PoolTrackTable[Hash].Key = Key;
932 TableEntry->Key = Key;
933 }
934 ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql);
935
936 //
937 // Now we force the loop to run again, and we should now end up in
938 // the code path above which does the interlocked increments...
939 //
940 continue;
941 }
942
943 //
944 // This path is hit when we don't have an entry, and the current bucket
945 // is full, so we simply try the next one
946 //
947 Hash = (Hash + 1) & TableMask;
948 if (Hash == Index) break;
949 }
950
951 //
952 // And finally this path is hit when all the buckets are full, and we need
953 // some expansion. This path is not yet supported in ReactOS and so we'll
954 // ignore the tag
955 //
956 DPRINT1("Out of pool tag space, ignoring...\n");
957 }
958
959 VOID
960 NTAPI
961 INIT_SECTION
962 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor,
963 IN POOL_TYPE PoolType,
964 IN ULONG PoolIndex,
965 IN ULONG Threshold,
966 IN PVOID PoolLock)
967 {
968 PLIST_ENTRY NextEntry, LastEntry;
969
970 //
971 // Setup the descriptor based on the caller's request
972 //
973 PoolDescriptor->PoolType = PoolType;
974 PoolDescriptor->PoolIndex = PoolIndex;
975 PoolDescriptor->Threshold = Threshold;
976 PoolDescriptor->LockAddress = PoolLock;
977
978 //
979 // Initialize accounting data
980 //
981 PoolDescriptor->RunningAllocs = 0;
982 PoolDescriptor->RunningDeAllocs = 0;
983 PoolDescriptor->TotalPages = 0;
984 PoolDescriptor->TotalBytes = 0;
985 PoolDescriptor->TotalBigPages = 0;
986
987 //
988 // Nothing pending for now
989 //
990 PoolDescriptor->PendingFrees = NULL;
991 PoolDescriptor->PendingFreeDepth = 0;
992
993 //
994 // Loop all the descriptor's allocation lists and initialize them
995 //
996 NextEntry = PoolDescriptor->ListHeads;
997 LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
998 while (NextEntry < LastEntry)
999 {
1000 ExpInitializePoolListHead(NextEntry);
1001 NextEntry++;
1002 }
1003
1004 //
1005 // Note that ReactOS does not support Session Pool Yet
1006 //
1007 ASSERT(PoolType != PagedPoolSession);
1008 }
1009
1010 VOID
1011 NTAPI
1012 INIT_SECTION
1013 InitializePool(IN POOL_TYPE PoolType,
1014 IN ULONG Threshold)
1015 {
1016 PPOOL_DESCRIPTOR Descriptor;
1017 SIZE_T TableSize;
1018 ULONG i;
1019
1020 //
1021 // Check what kind of pool this is
1022 //
1023 if (PoolType == NonPagedPool)
1024 {
1025 //
1026 // Compute the track table size and convert it from a power of two to an
1027 // actual byte size
1028 //
1029 // NOTE: On checked builds, we'll assert if the registry table size was
1030 // invalid, while on retail builds we'll just break out of the loop at
1031 // that point.
1032 //
1033 TableSize = min(PoolTrackTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
1034 for (i = 0; i < 32; i++)
1035 {
1036 if (TableSize & 1)
1037 {
1038 ASSERT((TableSize & ~1) == 0);
1039 if (!(TableSize & ~1)) break;
1040 }
1041 TableSize >>= 1;
1042 }
1043
1044 //
1045 // If we hit bit 32, than no size was defined in the registry, so
1046 // we'll use the default size of 2048 entries.
1047 //
1048 // Otherwise, use the size from the registry, as long as it's not
1049 // smaller than 64 entries.
1050 //
1051 if (i == 32)
1052 {
1053 PoolTrackTableSize = 2048;
1054 }
1055 else
1056 {
1057 PoolTrackTableSize = max(1 << i, 64);
1058 }
1059
1060 //
1061 // Loop trying with the biggest specified size first, and cut it down
1062 // by a power of two each iteration in case not enough memory exist
1063 //
1064 while (TRUE)
1065 {
1066 //
1067 // Do not allow overflow
1068 //
1069 if ((PoolTrackTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_TABLE)))
1070 {
1071 PoolTrackTableSize >>= 1;
1072 continue;
1073 }
1074
1075 //
1076 // Allocate the tracker table and exit the loop if this worked
1077 //
1078 PoolTrackTable = MiAllocatePoolPages(NonPagedPool,
1079 (PoolTrackTableSize + 1) *
1080 sizeof(POOL_TRACKER_TABLE));
1081 if (PoolTrackTable) break;
1082
1083 //
1084 // Otherwise, as long as we're not down to the last bit, keep
1085 // iterating
1086 //
1087 if (PoolTrackTableSize == 1)
1088 {
1089 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1090 TableSize,
1091 0xFFFFFFFF,
1092 0xFFFFFFFF,
1093 0xFFFFFFFF);
1094 }
1095 PoolTrackTableSize >>= 1;
1096 }
1097
1098 //
1099 // Add one entry, compute the hash, and zero the table
1100 //
1101 PoolTrackTableSize++;
1102 PoolTrackTableMask = PoolTrackTableSize - 2;
1103
1104 RtlZeroMemory(PoolTrackTable,
1105 PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1106
1107 //
1108 // Finally, add the most used tags to speed up those allocations
1109 //
1110 ExpSeedHotTags();
1111
1112 //
1113 // We now do the exact same thing with the tracker table for big pages
1114 //
1115 TableSize = min(PoolBigPageTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
1116 for (i = 0; i < 32; i++)
1117 {
1118 if (TableSize & 1)
1119 {
1120 ASSERT((TableSize & ~1) == 0);
1121 if (!(TableSize & ~1)) break;
1122 }
1123 TableSize >>= 1;
1124 }
1125
1126 //
1127 // For big pages, the default tracker table is 4096 entries, while the
1128 // minimum is still 64
1129 //
1130 if (i == 32)
1131 {
1132 PoolBigPageTableSize = 4096;
1133 }
1134 else
1135 {
1136 PoolBigPageTableSize = max(1 << i, 64);
1137 }
1138
1139 //
1140 // Again, run the exact same loop we ran earlier, but this time for the
1141 // big pool tracker instead
1142 //
1143 while (TRUE)
1144 {
1145 if ((PoolBigPageTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_BIG_PAGES)))
1146 {
1147 PoolBigPageTableSize >>= 1;
1148 continue;
1149 }
1150
1151 PoolBigPageTable = MiAllocatePoolPages(NonPagedPool,
1152 PoolBigPageTableSize *
1153 sizeof(POOL_TRACKER_BIG_PAGES));
1154 if (PoolBigPageTable) break;
1155
1156 if (PoolBigPageTableSize == 1)
1157 {
1158 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1159 TableSize,
1160 0xFFFFFFFF,
1161 0xFFFFFFFF,
1162 0xFFFFFFFF);
1163 }
1164
1165 PoolBigPageTableSize >>= 1;
1166 }
1167
1168 //
1169 // An extra entry is not needed for for the big pool tracker, so just
1170 // compute the hash and zero it
1171 //
1172 PoolBigPageTableHash = PoolBigPageTableSize - 1;
1173 RtlZeroMemory(PoolBigPageTable,
1174 PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
1175 for (i = 0; i < PoolBigPageTableSize; i++) PoolBigPageTable[i].Va = (PVOID)1;
1176
1177 //
1178 // During development, print this out so we can see what's happening
1179 //
1180 DPRINT("EXPOOL: Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1181 PoolTrackTable, PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1182 DPRINT("EXPOOL: Big Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1183 PoolBigPageTable, PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
1184
1185 //
1186 // Insert the generic tracker for all of big pool
1187 //
1188 ExpInsertPoolTracker('looP',
1189 ROUND_TO_PAGES(PoolBigPageTableSize *
1190 sizeof(POOL_TRACKER_BIG_PAGES)),
1191 NonPagedPool);
1192
1193 //
1194 // No support for NUMA systems at this time
1195 //
1196 ASSERT(KeNumberNodes == 1);
1197
1198 //
1199 // Initialize the tag spinlock
1200 //
1201 KeInitializeSpinLock(&ExpTaggedPoolLock);
1202
1203 //
1204 // Initialize the nonpaged pool descriptor
1205 //
1206 PoolVector[NonPagedPool] = &NonPagedPoolDescriptor;
1207 ExInitializePoolDescriptor(PoolVector[NonPagedPool],
1208 NonPagedPool,
1209 0,
1210 Threshold,
1211 NULL);
1212 }
1213 else
1214 {
1215 //
1216 // No support for NUMA systems at this time
1217 //
1218 ASSERT(KeNumberNodes == 1);
1219
1220 //
1221 // Allocate the pool descriptor
1222 //
1223 Descriptor = ExAllocatePoolWithTag(NonPagedPool,
1224 sizeof(KGUARDED_MUTEX) +
1225 sizeof(POOL_DESCRIPTOR),
1226 'looP');
1227 if (!Descriptor)
1228 {
1229 //
1230 // This is really bad...
1231 //
1232 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1233 0,
1234 -1,
1235 -1,
1236 -1);
1237 }
1238
1239 //
1240 // Setup the vector and guarded mutex for paged pool
1241 //
1242 PoolVector[PagedPool] = Descriptor;
1243 ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1);
1244 ExpPagedPoolDescriptor[0] = Descriptor;
1245 KeInitializeGuardedMutex(ExpPagedPoolMutex);
1246 ExInitializePoolDescriptor(Descriptor,
1247 PagedPool,
1248 0,
1249 Threshold,
1250 ExpPagedPoolMutex);
1251
1252 //
1253 // Insert the generic tracker for all of nonpaged pool
1254 //
1255 ExpInsertPoolTracker('looP',
1256 ROUND_TO_PAGES(PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)),
1257 NonPagedPool);
1258 }
1259 }
1260
1261 FORCEINLINE
1262 KIRQL
1263 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
1264 {
1265 //
1266 // Check if this is nonpaged pool
1267 //
1268 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1269 {
1270 //
1271 // Use the queued spin lock
1272 //
1273 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
1274 }
1275 else
1276 {
1277 //
1278 // Use the guarded mutex
1279 //
1280 KeAcquireGuardedMutex(Descriptor->LockAddress);
1281 return APC_LEVEL;
1282 }
1283 }
1284
1285 FORCEINLINE
1286 VOID
1287 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor,
1288 IN KIRQL OldIrql)
1289 {
1290 //
1291 // Check if this is nonpaged pool
1292 //
1293 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1294 {
1295 //
1296 // Use the queued spin lock
1297 //
1298 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
1299 }
1300 else
1301 {
1302 //
1303 // Use the guarded mutex
1304 //
1305 KeReleaseGuardedMutex(Descriptor->LockAddress);
1306 }
1307 }
1308
1309 VOID
1310 NTAPI
1311 ExpGetPoolTagInfoTarget(IN PKDPC Dpc,
1312 IN PVOID DeferredContext,
1313 IN PVOID SystemArgument1,
1314 IN PVOID SystemArgument2)
1315 {
1316 PPOOL_DPC_CONTEXT Context = DeferredContext;
1317 UNREFERENCED_PARAMETER(Dpc);
1318 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
1319
1320 //
1321 // Make sure we win the race, and if we did, copy the data atomically
1322 //
1323 if (KeSignalCallDpcSynchronize(SystemArgument2))
1324 {
1325 RtlCopyMemory(Context->PoolTrackTable,
1326 PoolTrackTable,
1327 Context->PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1328
1329 //
1330 // This is here because ReactOS does not yet support expansion
1331 //
1332 ASSERT(Context->PoolTrackTableSizeExpansion == 0);
1333 }
1334
1335 //
1336 // Regardless of whether we won or not, we must now synchronize and then
1337 // decrement the barrier since this is one more processor that has completed
1338 // the callback.
1339 //
1340 KeSignalCallDpcSynchronize(SystemArgument2);
1341 KeSignalCallDpcDone(SystemArgument1);
1342 }
1343
1344 NTSTATUS
1345 NTAPI
1346 ExGetPoolTagInfo(IN PSYSTEM_POOLTAG_INFORMATION SystemInformation,
1347 IN ULONG SystemInformationLength,
1348 IN OUT PULONG ReturnLength OPTIONAL)
1349 {
1350 ULONG TableSize, CurrentLength;
1351 ULONG EntryCount;
1352 NTSTATUS Status = STATUS_SUCCESS;
1353 PSYSTEM_POOLTAG TagEntry;
1354 PPOOL_TRACKER_TABLE Buffer, TrackerEntry;
1355 POOL_DPC_CONTEXT Context;
1356 ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
1357
1358 //
1359 // Keep track of how much data the caller's buffer must hold
1360 //
1361 CurrentLength = FIELD_OFFSET(SYSTEM_POOLTAG_INFORMATION, TagInfo);
1362
1363 //
1364 // Initialize the caller's buffer
1365 //
1366 TagEntry = &SystemInformation->TagInfo[0];
1367 SystemInformation->Count = 0;
1368
1369 //
1370 // Capture the number of entries, and the total size needed to make a copy
1371 // of the table
1372 //
1373 EntryCount = (ULONG)PoolTrackTableSize;
1374 TableSize = EntryCount * sizeof(POOL_TRACKER_TABLE);
1375
1376 //
1377 // Allocate the "Generic DPC" temporary buffer
1378 //
1379 Buffer = ExAllocatePoolWithTag(NonPagedPool, TableSize, 'ofnI');
1380 if (!Buffer) return STATUS_INSUFFICIENT_RESOURCES;
1381
1382 //
1383 // Do a "Generic DPC" to atomically retrieve the tag and allocation data
1384 //
1385 Context.PoolTrackTable = Buffer;
1386 Context.PoolTrackTableSize = PoolTrackTableSize;
1387 Context.PoolTrackTableExpansion = NULL;
1388 Context.PoolTrackTableSizeExpansion = 0;
1389 KeGenericCallDpc(ExpGetPoolTagInfoTarget, &Context);
1390
1391 //
1392 // Now parse the results
1393 //
1394 for (TrackerEntry = Buffer; TrackerEntry < (Buffer + EntryCount); TrackerEntry++)
1395 {
1396 //
1397 // If the entry is empty, skip it
1398 //
1399 if (!TrackerEntry->Key) continue;
1400
1401 //
1402 // Otherwise, add one more entry to the caller's buffer, and ensure that
1403 // enough space has been allocated in it
1404 //
1405 SystemInformation->Count++;
1406 CurrentLength += sizeof(*TagEntry);
1407 if (SystemInformationLength < CurrentLength)
1408 {
1409 //
1410 // The caller's buffer is too small, so set a failure code. The
1411 // caller will know the count, as well as how much space is needed.
1412 //
1413 // We do NOT break out of the loop, because we want to keep incrementing
1414 // the Count as well as CurrentLength so that the caller can know the
1415 // final numbers
1416 //
1417 Status = STATUS_INFO_LENGTH_MISMATCH;
1418 }
1419 else
1420 {
1421 //
1422 // Small sanity check that our accounting is working correctly
1423 //
1424 ASSERT(TrackerEntry->PagedAllocs >= TrackerEntry->PagedFrees);
1425 ASSERT(TrackerEntry->NonPagedAllocs >= TrackerEntry->NonPagedFrees);
1426
1427 //
1428 // Return the data into the caller's buffer
1429 //
1430 TagEntry->TagUlong = TrackerEntry->Key;
1431 TagEntry->PagedAllocs = TrackerEntry->PagedAllocs;
1432 TagEntry->PagedFrees = TrackerEntry->PagedFrees;
1433 TagEntry->PagedUsed = TrackerEntry->PagedBytes;
1434 TagEntry->NonPagedAllocs = TrackerEntry->NonPagedAllocs;
1435 TagEntry->NonPagedFrees = TrackerEntry->NonPagedFrees;
1436 TagEntry->NonPagedUsed = TrackerEntry->NonPagedBytes;
1437 TagEntry++;
1438 }
1439 }
1440
1441 //
1442 // Free the "Generic DPC" temporary buffer, return the buffer length and status
1443 //
1444 ExFreePoolWithTag(Buffer, 'ofnI');
1445 if (ReturnLength) *ReturnLength = CurrentLength;
1446 return Status;
1447 }
1448
1449 BOOLEAN
1450 NTAPI
1451 ExpAddTagForBigPages(IN PVOID Va,
1452 IN ULONG Key,
1453 IN ULONG NumberOfPages,
1454 IN POOL_TYPE PoolType)
1455 {
1456 ULONG Hash, i = 0;
1457 PVOID OldVa;
1458 KIRQL OldIrql;
1459 SIZE_T TableSize;
1460 PPOOL_TRACKER_BIG_PAGES Entry, EntryEnd, EntryStart;
1461 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1462 ASSERT(!(PoolType & SESSION_POOL_MASK));
1463
1464 //
1465 // As the table is expandable, these values must only be read after acquiring
1466 // the lock to avoid a teared access during an expansion
1467 //
1468 Hash = ExpComputePartialHashForAddress(Va);
1469 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1470 Hash &= PoolBigPageTableHash;
1471 TableSize = PoolBigPageTableSize;
1472
1473 //
1474 // We loop from the current hash bucket to the end of the table, and then
1475 // rollover to hash bucket 0 and keep going from there. If we return back
1476 // to the beginning, then we attempt expansion at the bottom of the loop
1477 //
1478 EntryStart = Entry = &PoolBigPageTable[Hash];
1479 EntryEnd = &PoolBigPageTable[TableSize];
1480 do
1481 {
1482 //
1483 // Make sure that this is a free entry and attempt to atomically make the
1484 // entry busy now
1485 //
1486 OldVa = Entry->Va;
1487 if (((ULONG_PTR)OldVa & POOL_BIG_TABLE_ENTRY_FREE) &&
1488 (InterlockedCompareExchangePointer(&Entry->Va, Va, OldVa) == OldVa))
1489 {
1490 //
1491 // We now own this entry, write down the size and the pool tag
1492 //
1493 Entry->Key = Key;
1494 Entry->NumberOfPages = NumberOfPages;
1495
1496 //
1497 // Add one more entry to the count, and see if we're getting within
1498 // 25% of the table size, at which point we'll do an expansion now
1499 // to avoid blocking too hard later on.
1500 //
1501 // Note that we only do this if it's also been the 16th time that we
1502 // keep losing the race or that we are not finding a free entry anymore,
1503 // which implies a massive number of concurrent big pool allocations.
1504 //
1505 InterlockedIncrementUL(&ExpPoolBigEntriesInUse);
1506 if ((i >= 16) && (ExpPoolBigEntriesInUse > (TableSize / 4)))
1507 {
1508 DPRINT("Should attempt expansion since we now have %lu entries\n",
1509 ExpPoolBigEntriesInUse);
1510 }
1511
1512 //
1513 // We have our entry, return
1514 //
1515 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1516 return TRUE;
1517 }
1518
1519 //
1520 // We don't have our entry yet, so keep trying, making the entry list
1521 // circular if we reach the last entry. We'll eventually break out of
1522 // the loop once we've rolled over and returned back to our original
1523 // hash bucket
1524 //
1525 i++;
1526 if (++Entry >= EntryEnd) Entry = &PoolBigPageTable[0];
1527 } while (Entry != EntryStart);
1528
1529 //
1530 // This means there's no free hash buckets whatsoever, so we would now have
1531 // to attempt expanding the table
1532 //
1533 DPRINT1("Big pool expansion needed, not implemented!\n");
1534 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1535 return FALSE;
1536 }
1537
1538 ULONG
1539 NTAPI
1540 ExpFindAndRemoveTagBigPages(IN PVOID Va,
1541 OUT PULONG_PTR BigPages,
1542 IN POOL_TYPE PoolType)
1543 {
1544 BOOLEAN FirstTry = TRUE;
1545 SIZE_T TableSize;
1546 KIRQL OldIrql;
1547 ULONG PoolTag, Hash;
1548 PPOOL_TRACKER_BIG_PAGES Entry;
1549 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1550 ASSERT(!(PoolType & SESSION_POOL_MASK));
1551
1552 //
1553 // As the table is expandable, these values must only be read after acquiring
1554 // the lock to avoid a teared access during an expansion
1555 //
1556 Hash = ExpComputePartialHashForAddress(Va);
1557 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1558 Hash &= PoolBigPageTableHash;
1559 TableSize = PoolBigPageTableSize;
1560
1561 //
1562 // Loop while trying to find this big page allocation
1563 //
1564 while (PoolBigPageTable[Hash].Va != Va)
1565 {
1566 //
1567 // Increment the size until we go past the end of the table
1568 //
1569 if (++Hash >= TableSize)
1570 {
1571 //
1572 // Is this the second time we've tried?
1573 //
1574 if (!FirstTry)
1575 {
1576 //
1577 // This means it was never inserted into the pool table and it
1578 // received the special "BIG" tag -- return that and return 0
1579 // so that the code can ask Mm for the page count instead
1580 //
1581 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1582 *BigPages = 0;
1583 return ' GIB';
1584 }
1585
1586 //
1587 // The first time this happens, reset the hash index and try again
1588 //
1589 Hash = 0;
1590 FirstTry = FALSE;
1591 }
1592 }
1593
1594 //
1595 // Now capture all the information we need from the entry, since after we
1596 // release the lock, the data can change
1597 //
1598 Entry = &PoolBigPageTable[Hash];
1599 *BigPages = Entry->NumberOfPages;
1600 PoolTag = Entry->Key;
1601
1602 //
1603 // Set the free bit, and decrement the number of allocations. Finally, release
1604 // the lock and return the tag that was located
1605 //
1606 InterlockedIncrement((PLONG)&Entry->Va);
1607 InterlockedDecrementUL(&ExpPoolBigEntriesInUse);
1608 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1609 return PoolTag;
1610 }
1611
1612 VOID
1613 NTAPI
1614 ExQueryPoolUsage(OUT PULONG PagedPoolPages,
1615 OUT PULONG NonPagedPoolPages,
1616 OUT PULONG PagedPoolAllocs,
1617 OUT PULONG PagedPoolFrees,
1618 OUT PULONG PagedPoolLookasideHits,
1619 OUT PULONG NonPagedPoolAllocs,
1620 OUT PULONG NonPagedPoolFrees,
1621 OUT PULONG NonPagedPoolLookasideHits)
1622 {
1623 ULONG i;
1624 PPOOL_DESCRIPTOR PoolDesc;
1625
1626 //
1627 // Assume all failures
1628 //
1629 *PagedPoolPages = 0;
1630 *PagedPoolAllocs = 0;
1631 *PagedPoolFrees = 0;
1632
1633 //
1634 // Tally up the totals for all the apged pool
1635 //
1636 for (i = 0; i < ExpNumberOfPagedPools + 1; i++)
1637 {
1638 PoolDesc = ExpPagedPoolDescriptor[i];
1639 *PagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1640 *PagedPoolAllocs += PoolDesc->RunningAllocs;
1641 *PagedPoolFrees += PoolDesc->RunningDeAllocs;
1642 }
1643
1644 //
1645 // The first non-paged pool has a hardcoded well-known descriptor name
1646 //
1647 PoolDesc = &NonPagedPoolDescriptor;
1648 *NonPagedPoolPages = PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1649 *NonPagedPoolAllocs = PoolDesc->RunningAllocs;
1650 *NonPagedPoolFrees = PoolDesc->RunningDeAllocs;
1651
1652 //
1653 // If the system has more than one non-paged pool, copy the other descriptor
1654 // totals as well
1655 //
1656 #if 0
1657 if (ExpNumberOfNonPagedPools > 1)
1658 {
1659 for (i = 0; i < ExpNumberOfNonPagedPools; i++)
1660 {
1661 PoolDesc = ExpNonPagedPoolDescriptor[i];
1662 *NonPagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1663 *NonPagedPoolAllocs += PoolDesc->RunningAllocs;
1664 *NonPagedPoolFrees += PoolDesc->RunningDeAllocs;
1665 }
1666 }
1667 #endif
1668
1669 //
1670 // FIXME: Not yet supported
1671 //
1672 *NonPagedPoolLookasideHits += 0;
1673 *PagedPoolLookasideHits += 0;
1674 }
1675
1676 VOID
1677 NTAPI
1678 ExReturnPoolQuota(IN PVOID P)
1679 {
1680 PPOOL_HEADER Entry;
1681 POOL_TYPE PoolType;
1682 USHORT BlockSize;
1683 PEPROCESS Process;
1684
1685 if ((ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) &&
1686 (MmIsSpecialPoolAddress(P)))
1687 {
1688 return;
1689 }
1690
1691 Entry = P;
1692 Entry--;
1693 ASSERT((ULONG_PTR)Entry % POOL_BLOCK_SIZE == 0);
1694
1695 PoolType = Entry->PoolType - 1;
1696 BlockSize = Entry->BlockSize;
1697
1698 if (PoolType & QUOTA_POOL_MASK)
1699 {
1700 Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
1701 ASSERT(Process != NULL);
1702 if (Process)
1703 {
1704 if (Process->Pcb.Header.Type != ProcessObject)
1705 {
1706 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
1707 Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
1708 KeBugCheckEx(BAD_POOL_CALLER,
1709 0x0D,
1710 (ULONG_PTR)P,
1711 Entry->PoolTag,
1712 (ULONG_PTR)Process);
1713 }
1714 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
1715 PsReturnPoolQuota(Process,
1716 PoolType & BASE_POOL_TYPE_MASK,
1717 BlockSize * POOL_BLOCK_SIZE);
1718 ObDereferenceObject(Process);
1719 }
1720 }
1721 }
1722
1723 /* PUBLIC FUNCTIONS ***********************************************************/
1724
1725 /*
1726 * @implemented
1727 */
1728 PVOID
1729 NTAPI
1730 ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
1731 IN SIZE_T NumberOfBytes,
1732 IN ULONG Tag)
1733 {
1734 PPOOL_DESCRIPTOR PoolDesc;
1735 PLIST_ENTRY ListHead;
1736 PPOOL_HEADER Entry, NextEntry, FragmentEntry;
1737 KIRQL OldIrql;
1738 USHORT BlockSize, i;
1739 ULONG OriginalType;
1740 PKPRCB Prcb = KeGetCurrentPrcb();
1741 PGENERAL_LOOKASIDE LookasideList;
1742
1743 //
1744 // Some sanity checks
1745 //
1746 ASSERT(Tag != 0);
1747 ASSERT(Tag != ' GIB');
1748 ASSERT(NumberOfBytes != 0);
1749 ExpCheckPoolIrqlLevel(PoolType, NumberOfBytes, NULL);
1750
1751 //
1752 // Not supported in ReactOS
1753 //
1754 ASSERT(!(PoolType & SESSION_POOL_MASK));
1755
1756 //
1757 // Check if verifier or special pool is enabled
1758 //
1759 if (ExpPoolFlags & (POOL_FLAG_VERIFIER | POOL_FLAG_SPECIAL_POOL))
1760 {
1761 //
1762 // For verifier, we should call the verification routine
1763 //
1764 if (ExpPoolFlags & POOL_FLAG_VERIFIER)
1765 {
1766 DPRINT1("Driver Verifier is not yet supported\n");
1767 }
1768
1769 //
1770 // For special pool, we check if this is a suitable allocation and do
1771 // the special allocation if needed
1772 //
1773 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
1774 {
1775 //
1776 // Check if this is a special pool allocation
1777 //
1778 if (MmUseSpecialPool(NumberOfBytes, Tag))
1779 {
1780 //
1781 // Try to allocate using special pool
1782 //
1783 Entry = MmAllocateSpecialPool(NumberOfBytes, Tag, PoolType, 2);
1784 if (Entry) return Entry;
1785 }
1786 }
1787 }
1788
1789 //
1790 // Get the pool type and its corresponding vector for this request
1791 //
1792 OriginalType = PoolType;
1793 PoolType = PoolType & BASE_POOL_TYPE_MASK;
1794 PoolDesc = PoolVector[PoolType];
1795 ASSERT(PoolDesc != NULL);
1796
1797 //
1798 // Check if this is a big page allocation
1799 //
1800 if (NumberOfBytes > POOL_MAX_ALLOC)
1801 {
1802 //
1803 // Allocate pages for it
1804 //
1805 Entry = MiAllocatePoolPages(OriginalType, NumberOfBytes);
1806 if (!Entry)
1807 {
1808 #if DBG
1809 //
1810 // Out of memory, display current consumption
1811 //
1812 MiDumpPoolConsumers(FALSE, 0, 0, 0);
1813 #endif
1814
1815 //
1816 // Must succeed pool is deprecated, but still supported. These allocation
1817 // failures must cause an immediate bugcheck
1818 //
1819 if (OriginalType & MUST_SUCCEED_POOL_MASK)
1820 {
1821 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1822 NumberOfBytes,
1823 NonPagedPoolDescriptor.TotalPages,
1824 NonPagedPoolDescriptor.TotalBigPages,
1825 0);
1826 }
1827
1828 //
1829 // Internal debugging
1830 //
1831 ExPoolFailures++;
1832
1833 //
1834 // This flag requests printing failures, and can also further specify
1835 // breaking on failures
1836 //
1837 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
1838 {
1839 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
1840 NumberOfBytes,
1841 OriginalType);
1842 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
1843 }
1844
1845 //
1846 // Finally, this flag requests an exception, which we are more than
1847 // happy to raise!
1848 //
1849 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
1850 {
1851 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
1852 }
1853
1854 return NULL;
1855 }
1856
1857 //
1858 // Increment required counters
1859 //
1860 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
1861 (LONG)BYTES_TO_PAGES(NumberOfBytes));
1862 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, NumberOfBytes);
1863 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
1864
1865 //
1866 // Add a tag for the big page allocation and switch to the generic "BIG"
1867 // tag if we failed to do so, then insert a tracker for this alloation.
1868 //
1869 if (!ExpAddTagForBigPages(Entry,
1870 Tag,
1871 (ULONG)BYTES_TO_PAGES(NumberOfBytes),
1872 OriginalType))
1873 {
1874 Tag = ' GIB';
1875 }
1876 ExpInsertPoolTracker(Tag, ROUND_TO_PAGES(NumberOfBytes), OriginalType);
1877 return Entry;
1878 }
1879
1880 //
1881 // Should never request 0 bytes from the pool, but since so many drivers do
1882 // it, we'll just assume they want 1 byte, based on NT's similar behavior
1883 //
1884 if (!NumberOfBytes) NumberOfBytes = 1;
1885
1886 //
1887 // A pool allocation is defined by its data, a linked list to connect it to
1888 // the free list (if necessary), and a pool header to store accounting info.
1889 // Calculate this size, then convert it into a block size (units of pool
1890 // headers)
1891 //
1892 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
1893 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
1894 // the direct allocation of pages.
1895 //
1896 i = (USHORT)((NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1))
1897 / POOL_BLOCK_SIZE);
1898 ASSERT(i < POOL_LISTS_PER_PAGE);
1899
1900 //
1901 // Handle lookaside list optimization for both paged and nonpaged pool
1902 //
1903 if (i <= NUMBER_POOL_LOOKASIDE_LISTS)
1904 {
1905 //
1906 // Try popping it from the per-CPU lookaside list
1907 //
1908 LookasideList = (PoolType == PagedPool) ?
1909 Prcb->PPPagedLookasideList[i - 1].P :
1910 Prcb->PPNPagedLookasideList[i - 1].P;
1911 LookasideList->TotalAllocates++;
1912 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
1913 if (!Entry)
1914 {
1915 //
1916 // We failed, try popping it from the global list
1917 //
1918 LookasideList = (PoolType == PagedPool) ?
1919 Prcb->PPPagedLookasideList[i - 1].L :
1920 Prcb->PPNPagedLookasideList[i - 1].L;
1921 LookasideList->TotalAllocates++;
1922 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
1923 }
1924
1925 //
1926 // If we were able to pop it, update the accounting and return the block
1927 //
1928 if (Entry)
1929 {
1930 LookasideList->AllocateHits++;
1931
1932 //
1933 // Get the real entry, write down its pool type, and track it
1934 //
1935 Entry--;
1936 Entry->PoolType = OriginalType + 1;
1937 ExpInsertPoolTracker(Tag,
1938 Entry->BlockSize * POOL_BLOCK_SIZE,
1939 OriginalType);
1940
1941 //
1942 // Return the pool allocation
1943 //
1944 Entry->PoolTag = Tag;
1945 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
1946 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
1947 return POOL_FREE_BLOCK(Entry);
1948 }
1949 }
1950
1951 //
1952 // Loop in the free lists looking for a block if this size. Start with the
1953 // list optimized for this kind of size lookup
1954 //
1955 ListHead = &PoolDesc->ListHeads[i];
1956 do
1957 {
1958 //
1959 // Are there any free entries available on this list?
1960 //
1961 if (!ExpIsPoolListEmpty(ListHead))
1962 {
1963 //
1964 // Acquire the pool lock now
1965 //
1966 OldIrql = ExLockPool(PoolDesc);
1967
1968 //
1969 // And make sure the list still has entries
1970 //
1971 if (ExpIsPoolListEmpty(ListHead))
1972 {
1973 //
1974 // Someone raced us (and won) before we had a chance to acquire
1975 // the lock.
1976 //
1977 // Try again!
1978 //
1979 ExUnlockPool(PoolDesc, OldIrql);
1980 continue;
1981 }
1982
1983 //
1984 // Remove a free entry from the list
1985 // Note that due to the way we insert free blocks into multiple lists
1986 // there is a guarantee that any block on this list will either be
1987 // of the correct size, or perhaps larger.
1988 //
1989 ExpCheckPoolLinks(ListHead);
1990 Entry = POOL_ENTRY(ExpRemovePoolHeadList(ListHead));
1991 ExpCheckPoolLinks(ListHead);
1992 ExpCheckPoolBlocks(Entry);
1993 ASSERT(Entry->BlockSize >= i);
1994 ASSERT(Entry->PoolType == 0);
1995
1996 //
1997 // Check if this block is larger that what we need. The block could
1998 // not possibly be smaller, due to the reason explained above (and
1999 // we would've asserted on a checked build if this was the case).
2000 //
2001 if (Entry->BlockSize != i)
2002 {
2003 //
2004 // Is there an entry before this one?
2005 //
2006 if (Entry->PreviousSize == 0)
2007 {
2008 //
2009 // There isn't anyone before us, so take the next block and
2010 // turn it into a fragment that contains the leftover data
2011 // that we don't need to satisfy the caller's request
2012 //
2013 FragmentEntry = POOL_BLOCK(Entry, i);
2014 FragmentEntry->BlockSize = Entry->BlockSize - i;
2015
2016 //
2017 // And make it point back to us
2018 //
2019 FragmentEntry->PreviousSize = i;
2020
2021 //
2022 // Now get the block that follows the new fragment and check
2023 // if it's still on the same page as us (and not at the end)
2024 //
2025 NextEntry = POOL_NEXT_BLOCK(FragmentEntry);
2026 if (PAGE_ALIGN(NextEntry) != NextEntry)
2027 {
2028 //
2029 // Adjust this next block to point to our newly created
2030 // fragment block
2031 //
2032 NextEntry->PreviousSize = FragmentEntry->BlockSize;
2033 }
2034 }
2035 else
2036 {
2037 //
2038 // There is a free entry before us, which we know is smaller
2039 // so we'll make this entry the fragment instead
2040 //
2041 FragmentEntry = Entry;
2042
2043 //
2044 // And then we'll remove from it the actual size required.
2045 // Now the entry is a leftover free fragment
2046 //
2047 Entry->BlockSize -= i;
2048
2049 //
2050 // Now let's go to the next entry after the fragment (which
2051 // used to point to our original free entry) and make it
2052 // reference the new fragment entry instead.
2053 //
2054 // This is the entry that will actually end up holding the
2055 // allocation!
2056 //
2057 Entry = POOL_NEXT_BLOCK(Entry);
2058 Entry->PreviousSize = FragmentEntry->BlockSize;
2059
2060 //
2061 // And now let's go to the entry after that one and check if
2062 // it's still on the same page, and not at the end
2063 //
2064 NextEntry = POOL_BLOCK(Entry, i);
2065 if (PAGE_ALIGN(NextEntry) != NextEntry)
2066 {
2067 //
2068 // Make it reference the allocation entry
2069 //
2070 NextEntry->PreviousSize = i;
2071 }
2072 }
2073
2074 //
2075 // Now our (allocation) entry is the right size
2076 //
2077 Entry->BlockSize = i;
2078
2079 //
2080 // And the next entry is now the free fragment which contains
2081 // the remaining difference between how big the original entry
2082 // was, and the actual size the caller needs/requested.
2083 //
2084 FragmentEntry->PoolType = 0;
2085 BlockSize = FragmentEntry->BlockSize;
2086
2087 //
2088 // Now check if enough free bytes remained for us to have a
2089 // "full" entry, which contains enough bytes for a linked list
2090 // and thus can be used for allocations (up to 8 bytes...)
2091 //
2092 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2093 if (BlockSize != 1)
2094 {
2095 //
2096 // Insert the free entry into the free list for this size
2097 //
2098 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2099 POOL_FREE_BLOCK(FragmentEntry));
2100 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2101 }
2102 }
2103
2104 //
2105 // We have found an entry for this allocation, so set the pool type
2106 // and release the lock since we're done
2107 //
2108 Entry->PoolType = OriginalType + 1;
2109 ExpCheckPoolBlocks(Entry);
2110 ExUnlockPool(PoolDesc, OldIrql);
2111
2112 //
2113 // Increment required counters
2114 //
2115 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
2116 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
2117
2118 //
2119 // Track this allocation
2120 //
2121 ExpInsertPoolTracker(Tag,
2122 Entry->BlockSize * POOL_BLOCK_SIZE,
2123 OriginalType);
2124
2125 //
2126 // Return the pool allocation
2127 //
2128 Entry->PoolTag = Tag;
2129 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
2130 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
2131 return POOL_FREE_BLOCK(Entry);
2132 }
2133 } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
2134
2135 //
2136 // There were no free entries left, so we have to allocate a new fresh page
2137 //
2138 Entry = MiAllocatePoolPages(OriginalType, PAGE_SIZE);
2139 if (!Entry)
2140 {
2141 #if DBG
2142 //
2143 // Out of memory, display current consumption
2144 //
2145 MiDumpPoolConsumers(FALSE, 0, 0, 0);
2146 #endif
2147
2148 //
2149 // Must succeed pool is deprecated, but still supported. These allocation
2150 // failures must cause an immediate bugcheck
2151 //
2152 if (OriginalType & MUST_SUCCEED_POOL_MASK)
2153 {
2154 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
2155 PAGE_SIZE,
2156 NonPagedPoolDescriptor.TotalPages,
2157 NonPagedPoolDescriptor.TotalBigPages,
2158 0);
2159 }
2160
2161 //
2162 // Internal debugging
2163 //
2164 ExPoolFailures++;
2165
2166 //
2167 // This flag requests printing failures, and can also further specify
2168 // breaking on failures
2169 //
2170 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
2171 {
2172 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
2173 NumberOfBytes,
2174 OriginalType);
2175 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
2176 }
2177
2178 //
2179 // Finally, this flag requests an exception, which we are more than
2180 // happy to raise!
2181 //
2182 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
2183 {
2184 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
2185 }
2186
2187 //
2188 // Return NULL to the caller in all other cases
2189 //
2190 return NULL;
2191 }
2192
2193 //
2194 // Setup the entry data
2195 //
2196 Entry->Ulong1 = 0;
2197 Entry->BlockSize = i;
2198 Entry->PoolType = OriginalType + 1;
2199
2200 //
2201 // This page will have two entries -- one for the allocation (which we just
2202 // created above), and one for the remaining free bytes, which we're about
2203 // to create now. The free bytes are the whole page minus what was allocated
2204 // and then converted into units of block headers.
2205 //
2206 BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i;
2207 FragmentEntry = POOL_BLOCK(Entry, i);
2208 FragmentEntry->Ulong1 = 0;
2209 FragmentEntry->BlockSize = BlockSize;
2210 FragmentEntry->PreviousSize = i;
2211
2212 //
2213 // Increment required counters
2214 //
2215 InterlockedIncrement((PLONG)&PoolDesc->TotalPages);
2216 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
2217
2218 //
2219 // Now check if enough free bytes remained for us to have a "full" entry,
2220 // which contains enough bytes for a linked list and thus can be used for
2221 // allocations (up to 8 bytes...)
2222 //
2223 if (FragmentEntry->BlockSize != 1)
2224 {
2225 //
2226 // Excellent -- acquire the pool lock
2227 //
2228 OldIrql = ExLockPool(PoolDesc);
2229
2230 //
2231 // And insert the free entry into the free list for this block size
2232 //
2233 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2234 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2235 POOL_FREE_BLOCK(FragmentEntry));
2236 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2237
2238 //
2239 // Release the pool lock
2240 //
2241 ExpCheckPoolBlocks(Entry);
2242 ExUnlockPool(PoolDesc, OldIrql);
2243 }
2244 else
2245 {
2246 //
2247 // Simply do a sanity check
2248 //
2249 ExpCheckPoolBlocks(Entry);
2250 }
2251
2252 //
2253 // Increment performance counters and track this allocation
2254 //
2255 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
2256 ExpInsertPoolTracker(Tag,
2257 Entry->BlockSize * POOL_BLOCK_SIZE,
2258 OriginalType);
2259
2260 //
2261 // And return the pool allocation
2262 //
2263 ExpCheckPoolBlocks(Entry);
2264 Entry->PoolTag = Tag;
2265 return POOL_FREE_BLOCK(Entry);
2266 }
2267
2268 /*
2269 * @implemented
2270 */
2271 PVOID
2272 NTAPI
2273 ExAllocatePool(POOL_TYPE PoolType,
2274 SIZE_T NumberOfBytes)
2275 {
2276 ULONG Tag = TAG_NONE;
2277 #if 0 && DBG
2278 PLDR_DATA_TABLE_ENTRY LdrEntry;
2279
2280 /* Use the first four letters of the driver name, or "None" if unavailable */
2281 LdrEntry = KeGetCurrentIrql() <= APC_LEVEL
2282 ? MiLookupDataTableEntry(_ReturnAddress())
2283 : NULL;
2284 if (LdrEntry)
2285 {
2286 ULONG i;
2287 Tag = 0;
2288 for (i = 0; i < min(4, LdrEntry->BaseDllName.Length / sizeof(WCHAR)); i++)
2289 Tag = Tag >> 8 | (LdrEntry->BaseDllName.Buffer[i] & 0xff) << 24;
2290 for (; i < 4; i++)
2291 Tag = Tag >> 8 | ' ' << 24;
2292 }
2293 #endif
2294 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2295 }
2296
2297 /*
2298 * @implemented
2299 */
2300 VOID
2301 NTAPI
2302 ExFreePoolWithTag(IN PVOID P,
2303 IN ULONG TagToFree)
2304 {
2305 PPOOL_HEADER Entry, NextEntry;
2306 USHORT BlockSize;
2307 KIRQL OldIrql;
2308 POOL_TYPE PoolType;
2309 PPOOL_DESCRIPTOR PoolDesc;
2310 ULONG Tag;
2311 BOOLEAN Combined = FALSE;
2312 PFN_NUMBER PageCount, RealPageCount;
2313 PKPRCB Prcb = KeGetCurrentPrcb();
2314 PGENERAL_LOOKASIDE LookasideList;
2315 PEPROCESS Process;
2316
2317 //
2318 // Check if any of the debug flags are enabled
2319 //
2320 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
2321 POOL_FLAG_CHECK_WORKERS |
2322 POOL_FLAG_CHECK_RESOURCES |
2323 POOL_FLAG_VERIFIER |
2324 POOL_FLAG_CHECK_DEADLOCK |
2325 POOL_FLAG_SPECIAL_POOL))
2326 {
2327 //
2328 // Check if special pool is enabled
2329 //
2330 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
2331 {
2332 //
2333 // Check if it was allocated from a special pool
2334 //
2335 if (MmIsSpecialPoolAddress(P))
2336 {
2337 //
2338 // Was deadlock verification also enabled? We can do some extra
2339 // checks at this point
2340 //
2341 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2342 {
2343 DPRINT1("Verifier not yet supported\n");
2344 }
2345
2346 //
2347 // It is, so handle it via special pool free routine
2348 //
2349 MmFreeSpecialPool(P);
2350 return;
2351 }
2352 }
2353
2354 //
2355 // For non-big page allocations, we'll do a bunch of checks in here
2356 //
2357 if (PAGE_ALIGN(P) != P)
2358 {
2359 //
2360 // Get the entry for this pool allocation
2361 // The pointer math here may look wrong or confusing, but it is quite right
2362 //
2363 Entry = P;
2364 Entry--;
2365
2366 //
2367 // Get the pool type
2368 //
2369 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2370
2371 //
2372 // FIXME: Many other debugging checks go here
2373 //
2374 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2375 }
2376 }
2377
2378 //
2379 // Check if this is a big page allocation
2380 //
2381 if (PAGE_ALIGN(P) == P)
2382 {
2383 //
2384 // We need to find the tag for it, so first we need to find out what
2385 // kind of allocation this was (paged or nonpaged), then we can go
2386 // ahead and try finding the tag for it. Remember to get rid of the
2387 // PROTECTED_POOL tag if it's found.
2388 //
2389 // Note that if at insertion time, we failed to add the tag for a big
2390 // pool allocation, we used a special tag called 'BIG' to identify the
2391 // allocation, and we may get this tag back. In this scenario, we must
2392 // manually get the size of the allocation by actually counting through
2393 // the PFN database.
2394 //
2395 PoolType = MmDeterminePoolType(P);
2396 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2397 Tag = ExpFindAndRemoveTagBigPages(P, &PageCount, PoolType);
2398 if (!Tag)
2399 {
2400 DPRINT1("We do not know the size of this allocation. This is not yet supported\n");
2401 ASSERT(Tag == ' GIB');
2402 PageCount = 1; // We are going to lie! This might screw up accounting?
2403 }
2404 else if (Tag & PROTECTED_POOL)
2405 {
2406 Tag &= ~PROTECTED_POOL;
2407 }
2408
2409 //
2410 // Check block tag
2411 //
2412 if (TagToFree && TagToFree != Tag)
2413 {
2414 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2415 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2416 }
2417
2418 //
2419 // We have our tag and our page count, so we can go ahead and remove this
2420 // tracker now
2421 //
2422 ExpRemovePoolTracker(Tag, PageCount << PAGE_SHIFT, PoolType);
2423
2424 //
2425 // Check if any of the debug flags are enabled
2426 //
2427 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
2428 POOL_FLAG_CHECK_WORKERS |
2429 POOL_FLAG_CHECK_RESOURCES |
2430 POOL_FLAG_CHECK_DEADLOCK))
2431 {
2432 //
2433 // Was deadlock verification also enabled? We can do some extra
2434 // checks at this point
2435 //
2436 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2437 {
2438 DPRINT1("Verifier not yet supported\n");
2439 }
2440
2441 //
2442 // FIXME: Many debugging checks go here
2443 //
2444 }
2445
2446 //
2447 // Update counters
2448 //
2449 PoolDesc = PoolVector[PoolType];
2450 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2451 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes,
2452 -(LONG_PTR)(PageCount << PAGE_SHIFT));
2453
2454 //
2455 // Do the real free now and update the last counter with the big page count
2456 //
2457 RealPageCount = MiFreePoolPages(P);
2458 ASSERT(RealPageCount == PageCount);
2459 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
2460 -(LONG)RealPageCount);
2461 return;
2462 }
2463
2464 //
2465 // Get the entry for this pool allocation
2466 // The pointer math here may look wrong or confusing, but it is quite right
2467 //
2468 Entry = P;
2469 Entry--;
2470 ASSERT((ULONG_PTR)Entry % POOL_BLOCK_SIZE == 0);
2471
2472 //
2473 // Get the size of the entry, and it's pool type, then load the descriptor
2474 // for this pool type
2475 //
2476 BlockSize = Entry->BlockSize;
2477 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2478 PoolDesc = PoolVector[PoolType];
2479
2480 //
2481 // Make sure that the IRQL makes sense
2482 //
2483 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2484
2485 //
2486 // Get the pool tag and get rid of the PROTECTED_POOL flag
2487 //
2488 Tag = Entry->PoolTag;
2489 if (Tag & PROTECTED_POOL) Tag &= ~PROTECTED_POOL;
2490
2491 //
2492 // Check block tag
2493 //
2494 if (TagToFree && TagToFree != Tag)
2495 {
2496 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2497 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2498 }
2499
2500 //
2501 // Track the removal of this allocation
2502 //
2503 ExpRemovePoolTracker(Tag,
2504 BlockSize * POOL_BLOCK_SIZE,
2505 Entry->PoolType - 1);
2506
2507 //
2508 // Release pool quota, if any
2509 //
2510 if ((Entry->PoolType - 1) & QUOTA_POOL_MASK)
2511 {
2512 Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
2513 if (Process)
2514 {
2515 if (Process->Pcb.Header.Type != ProcessObject)
2516 {
2517 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
2518 Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
2519 KeBugCheckEx(BAD_POOL_CALLER,
2520 0x0D,
2521 (ULONG_PTR)P,
2522 Tag,
2523 (ULONG_PTR)Process);
2524 }
2525 PsReturnPoolQuota(Process, PoolType, BlockSize * POOL_BLOCK_SIZE);
2526 ObDereferenceObject(Process);
2527 }
2528 }
2529
2530 //
2531 // Is this allocation small enough to have come from a lookaside list?
2532 //
2533 if (BlockSize <= NUMBER_POOL_LOOKASIDE_LISTS)
2534 {
2535 //
2536 // Try pushing it into the per-CPU lookaside list
2537 //
2538 LookasideList = (PoolType == PagedPool) ?
2539 Prcb->PPPagedLookasideList[BlockSize - 1].P :
2540 Prcb->PPNPagedLookasideList[BlockSize - 1].P;
2541 LookasideList->TotalFrees++;
2542 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2543 {
2544 LookasideList->FreeHits++;
2545 InterlockedPushEntrySList(&LookasideList->ListHead, P);
2546 return;
2547 }
2548
2549 //
2550 // We failed, try to push it into the global lookaside list
2551 //
2552 LookasideList = (PoolType == PagedPool) ?
2553 Prcb->PPPagedLookasideList[BlockSize - 1].L :
2554 Prcb->PPNPagedLookasideList[BlockSize - 1].L;
2555 LookasideList->TotalFrees++;
2556 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2557 {
2558 LookasideList->FreeHits++;
2559 InterlockedPushEntrySList(&LookasideList->ListHead, P);
2560 return;
2561 }
2562 }
2563
2564 //
2565 // Get the pointer to the next entry
2566 //
2567 NextEntry = POOL_BLOCK(Entry, BlockSize);
2568
2569 //
2570 // Update performance counters
2571 //
2572 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2573 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, -BlockSize * POOL_BLOCK_SIZE);
2574
2575 //
2576 // Acquire the pool lock
2577 //
2578 OldIrql = ExLockPool(PoolDesc);
2579
2580 //
2581 // Check if the next allocation is at the end of the page
2582 //
2583 ExpCheckPoolBlocks(Entry);
2584 if (PAGE_ALIGN(NextEntry) != NextEntry)
2585 {
2586 //
2587 // We may be able to combine the block if it's free
2588 //
2589 if (NextEntry->PoolType == 0)
2590 {
2591 //
2592 // The next block is free, so we'll do a combine
2593 //
2594 Combined = TRUE;
2595
2596 //
2597 // Make sure there's actual data in the block -- anything smaller
2598 // than this means we only have the header, so there's no linked list
2599 // for us to remove
2600 //
2601 if ((NextEntry->BlockSize != 1))
2602 {
2603 //
2604 // The block is at least big enough to have a linked list, so go
2605 // ahead and remove it
2606 //
2607 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2608 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2609 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2610 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2611 }
2612
2613 //
2614 // Our entry is now combined with the next entry
2615 //
2616 Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
2617 }
2618 }
2619
2620 //
2621 // Now check if there was a previous entry on the same page as us
2622 //
2623 if (Entry->PreviousSize)
2624 {
2625 //
2626 // Great, grab that entry and check if it's free
2627 //
2628 NextEntry = POOL_PREV_BLOCK(Entry);
2629 if (NextEntry->PoolType == 0)
2630 {
2631 //
2632 // It is, so we can do a combine
2633 //
2634 Combined = TRUE;
2635
2636 //
2637 // Make sure there's actual data in the block -- anything smaller
2638 // than this means we only have the header so there's no linked list
2639 // for us to remove
2640 //
2641 if ((NextEntry->BlockSize != 1))
2642 {
2643 //
2644 // The block is at least big enough to have a linked list, so go
2645 // ahead and remove it
2646 //
2647 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2648 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2649 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2650 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2651 }
2652
2653 //
2654 // Combine our original block (which might've already been combined
2655 // with the next block), into the previous block
2656 //
2657 NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
2658
2659 //
2660 // And now we'll work with the previous block instead
2661 //
2662 Entry = NextEntry;
2663 }
2664 }
2665
2666 //
2667 // By now, it may have been possible for our combined blocks to actually
2668 // have made up a full page (if there were only 2-3 allocations on the
2669 // page, they could've all been combined).
2670 //
2671 if ((PAGE_ALIGN(Entry) == Entry) &&
2672 (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry)) == POOL_NEXT_BLOCK(Entry)))
2673 {
2674 //
2675 // In this case, release the pool lock, update the performance counter,
2676 // and free the page
2677 //
2678 ExUnlockPool(PoolDesc, OldIrql);
2679 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalPages, -1);
2680 MiFreePoolPages(Entry);
2681 return;
2682 }
2683
2684 //
2685 // Otherwise, we now have a free block (or a combination of 2 or 3)
2686 //
2687 Entry->PoolType = 0;
2688 BlockSize = Entry->BlockSize;
2689 ASSERT(BlockSize != 1);
2690
2691 //
2692 // Check if we actually did combine it with anyone
2693 //
2694 if (Combined)
2695 {
2696 //
2697 // Get the first combined block (either our original to begin with, or
2698 // the one after the original, depending if we combined with the previous)
2699 //
2700 NextEntry = POOL_NEXT_BLOCK(Entry);
2701
2702 //
2703 // As long as the next block isn't on a page boundary, have it point
2704 // back to us
2705 //
2706 if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
2707 }
2708
2709 //
2710 // Insert this new free block, and release the pool lock
2711 //
2712 ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
2713 ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry));
2714 ExUnlockPool(PoolDesc, OldIrql);
2715 }
2716
2717 /*
2718 * @implemented
2719 */
2720 VOID
2721 NTAPI
2722 ExFreePool(PVOID P)
2723 {
2724 //
2725 // Just free without checking for the tag
2726 //
2727 ExFreePoolWithTag(P, 0);
2728 }
2729
2730 /*
2731 * @unimplemented
2732 */
2733 SIZE_T
2734 NTAPI
2735 ExQueryPoolBlockSize(IN PVOID PoolBlock,
2736 OUT PBOOLEAN QuotaCharged)
2737 {
2738 //
2739 // Not implemented
2740 //
2741 UNIMPLEMENTED;
2742 return FALSE;
2743 }
2744
2745 /*
2746 * @implemented
2747 */
2748
2749 PVOID
2750 NTAPI
2751 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType,
2752 IN SIZE_T NumberOfBytes)
2753 {
2754 //
2755 // Allocate the pool
2756 //
2757 return ExAllocatePoolWithQuotaTag(PoolType, NumberOfBytes, TAG_NONE);
2758 }
2759
2760 /*
2761 * @implemented
2762 */
2763 PVOID
2764 NTAPI
2765 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType,
2766 IN SIZE_T NumberOfBytes,
2767 IN ULONG Tag,
2768 IN EX_POOL_PRIORITY Priority)
2769 {
2770 PVOID Buffer;
2771
2772 //
2773 // Allocate the pool
2774 //
2775 Buffer = ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2776 if (Buffer == NULL)
2777 {
2778 UNIMPLEMENTED;
2779 }
2780
2781 return Buffer;
2782 }
2783
2784 /*
2785 * @implemented
2786 */
2787 PVOID
2788 NTAPI
2789 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType,
2790 IN SIZE_T NumberOfBytes,
2791 IN ULONG Tag)
2792 {
2793 BOOLEAN Raise = TRUE;
2794 PVOID Buffer;
2795 PPOOL_HEADER Entry;
2796 NTSTATUS Status;
2797 PEPROCESS Process = PsGetCurrentProcess();
2798
2799 //
2800 // Check if we should fail instead of raising an exception
2801 //
2802 if (PoolType & POOL_QUOTA_FAIL_INSTEAD_OF_RAISE)
2803 {
2804 Raise = FALSE;
2805 PoolType &= ~POOL_QUOTA_FAIL_INSTEAD_OF_RAISE;
2806 }
2807
2808 //
2809 // Inject the pool quota mask
2810 //
2811 PoolType += QUOTA_POOL_MASK;
2812
2813 //
2814 // Check if we have enough space to add the quota owner process, as long as
2815 // this isn't the system process, which never gets charged quota
2816 //
2817 ASSERT(NumberOfBytes != 0);
2818 if ((NumberOfBytes <= (PAGE_SIZE - POOL_BLOCK_SIZE - sizeof(PVOID))) &&
2819 (Process != PsInitialSystemProcess))
2820 {
2821 //
2822 // Add space for our EPROCESS pointer
2823 //
2824 NumberOfBytes += sizeof(PEPROCESS);
2825 }
2826 else
2827 {
2828 //
2829 // We won't be able to store the pointer, so don't use quota for this
2830 //
2831 PoolType -= QUOTA_POOL_MASK;
2832 }
2833
2834 //
2835 // Allocate the pool buffer now
2836 //
2837 Buffer = ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2838
2839 //
2840 // If the buffer is page-aligned, this is a large page allocation and we
2841 // won't touch it
2842 //
2843 if (PAGE_ALIGN(Buffer) != Buffer)
2844 {
2845 //
2846 // Also if special pool is enabled, and this was allocated from there,
2847 // we won't touch it either
2848 //
2849 if ((ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) &&
2850 (MmIsSpecialPoolAddress(Buffer)))
2851 {
2852 return Buffer;
2853 }
2854
2855 //
2856 // If it wasn't actually allocated with quota charges, ignore it too
2857 //
2858 if (!(PoolType & QUOTA_POOL_MASK)) return Buffer;
2859
2860 //
2861 // If this is the system process, we don't charge quota, so ignore
2862 //
2863 if (Process == PsInitialSystemProcess) return Buffer;
2864
2865 //
2866 // Actually go and charge quota for the process now
2867 //
2868 Entry = POOL_ENTRY(Buffer);
2869 Status = PsChargeProcessPoolQuota(Process,
2870 PoolType & BASE_POOL_TYPE_MASK,
2871 Entry->BlockSize * POOL_BLOCK_SIZE);
2872 if (!NT_SUCCESS(Status))
2873 {
2874 //
2875 // Quota failed, back out the allocation, clear the owner, and fail
2876 //
2877 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
2878 ExFreePoolWithTag(Buffer, Tag);
2879 if (Raise) RtlRaiseStatus(Status);
2880 return NULL;
2881 }
2882
2883 //
2884 // Quota worked, write the owner and then reference it before returning
2885 //
2886 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = Process;
2887 ObReferenceObject(Process);
2888 }
2889 else if (!(Buffer) && (Raise))
2890 {
2891 //
2892 // The allocation failed, raise an error if we are in raise mode
2893 //
2894 RtlRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
2895 }
2896
2897 //
2898 // Return the allocated buffer
2899 //
2900 return Buffer;
2901 }
2902
2903 #if DBG && defined(KDBG)
2904
2905 BOOLEAN
2906 ExpKdbgExtPool(
2907 ULONG Argc,
2908 PCHAR Argv[])
2909 {
2910 ULONG_PTR Address = 0, Flags = 0;
2911 PVOID PoolPage;
2912 PPOOL_HEADER Entry;
2913 BOOLEAN ThisOne;
2914 PULONG Data;
2915
2916 if (Argc > 1)
2917 {
2918 /* Get address */
2919 if (!KdbpGetHexNumber(Argv[1], &Address))
2920 {
2921 KdbpPrint("Invalid parameter: %s\n", Argv[0]);
2922 return TRUE;
2923 }
2924 }
2925
2926 if (Argc > 2)
2927 {
2928 /* Get address */
2929 if (!KdbpGetHexNumber(Argv[1], &Flags))
2930 {
2931 KdbpPrint("Invalid parameter: %s\n", Argv[0]);
2932 return TRUE;
2933 }
2934 }
2935
2936 /* Check if we got an address */
2937 if (Address != 0)
2938 {
2939 /* Get the base page */
2940 PoolPage = PAGE_ALIGN(Address);
2941 }
2942 else
2943 {
2944 KdbpPrint("Heap is unimplemented\n");
2945 return TRUE;
2946 }
2947
2948 /* No paging support! */
2949 if (!MmIsAddressValid(PoolPage))
2950 {
2951 KdbpPrint("Address not accessible!\n");
2952 return TRUE;
2953 }
2954
2955 /* Get pool type */
2956 if ((Address >= (ULONG_PTR)MmPagedPoolStart) && (Address <= (ULONG_PTR)MmPagedPoolEnd))
2957 KdbpPrint("Allocation is from PagedPool region\n");
2958 else if ((Address >= (ULONG_PTR)MmNonPagedPoolStart) && (Address <= (ULONG_PTR)MmNonPagedPoolEnd))
2959 KdbpPrint("Allocation is from NonPagedPool region\n");
2960 else
2961 {
2962 KdbpPrint("Address 0x%p is not within any pool!\n", (PVOID)Address);
2963 return TRUE;
2964 }
2965
2966 /* Loop all entries of that page */
2967 Entry = PoolPage;
2968 do
2969 {
2970 /* Check if the address is within that entry */
2971 ThisOne = ((Address >= (ULONG_PTR)Entry) &&
2972 (Address < (ULONG_PTR)(Entry + Entry->BlockSize)));
2973
2974 if (!(Flags & 1) || ThisOne)
2975 {
2976 /* Print the line */
2977 KdbpPrint("%c%p size: %4d previous size: %4d %s %.4s\n",
2978 ThisOne ? '*' : ' ', Entry, Entry->BlockSize, Entry->PreviousSize,
2979 (Flags & 0x80000000) ? "" : (Entry->PoolType ? "(Allocated)" : "(Free) "),
2980 (Flags & 0x80000000) ? "" : (PCHAR)&Entry->PoolTag);
2981 }
2982
2983 if (Flags & 1)
2984 {
2985 Data = (PULONG)(Entry + 1);
2986 KdbpPrint(" %p %08lx %08lx %08lx %08lx\n"
2987 " %p %08lx %08lx %08lx %08lx\n",
2988 &Data[0], Data[0], Data[1], Data[2], Data[3],
2989 &Data[4], Data[4], Data[5], Data[6], Data[7]);
2990 }
2991
2992 /* Go to next entry */
2993 Entry = POOL_BLOCK(Entry, Entry->BlockSize);
2994 }
2995 while ((Entry->BlockSize != 0) && ((ULONG_PTR)Entry < (ULONG_PTR)PoolPage + PAGE_SIZE));
2996
2997 return TRUE;
2998 }
2999
3000 static
3001 VOID
3002 ExpKdbgExtPoolUsedGetTag(PCHAR Arg, PULONG Tag, PULONG Mask)
3003 {
3004 CHAR Tmp[4];
3005 ULONG Len;
3006 USHORT i;
3007
3008 /* Get the tag */
3009 Len = strlen(Arg);
3010 if (Len > 4)
3011 {
3012 Len = 4;
3013 }
3014
3015 /* Generate the mask to have wildcards support */
3016 for (i = 0; i < Len; ++i)
3017 {
3018 Tmp[i] = Arg[i];
3019 if (Tmp[i] != '?')
3020 {
3021 *Mask |= (0xFF << i * 8);
3022 }
3023 }
3024
3025 /* Get the tag in the ulong form */
3026 *Tag = *((PULONG)Tmp);
3027 }
3028
3029 BOOLEAN
3030 ExpKdbgExtPoolUsed(
3031 ULONG Argc,
3032 PCHAR Argv[])
3033 {
3034 ULONG Tag = 0;
3035 ULONG Mask = 0;
3036 ULONG Flags = 0;
3037
3038 if (Argc > 1)
3039 {
3040 /* If we have 2+ args, easy: flags then tag */
3041 if (Argc > 2)
3042 {
3043 ExpKdbgExtPoolUsedGetTag(Argv[2], &Tag, &Mask);
3044 if (!KdbpGetHexNumber(Argv[1], &Flags))
3045 {
3046 KdbpPrint("Invalid parameter: %s\n", Argv[0]);
3047 }
3048 }
3049 else
3050 {
3051 /* Otherwise, try to find out whether that's flags */
3052 if (strlen(Argv[1]) == 1 ||
3053 (strlen(Argv[1]) == 3 && Argv[1][0] == '0' && Argv[1][1] == 'x'))
3054 {
3055 /* Fallback: if reading flags failed, assume it's a tag */
3056 if (!KdbpGetHexNumber(Argv[1], &Flags))
3057 {
3058 ExpKdbgExtPoolUsedGetTag(Argv[1], &Tag, &Mask);
3059 }
3060 }
3061 /* Or tag */
3062 else
3063 {
3064 ExpKdbgExtPoolUsedGetTag(Argv[1], &Tag, &Mask);
3065 }
3066 }
3067 }
3068
3069 /* Call the dumper */
3070 MiDumpPoolConsumers(TRUE, Tag, Mask, Flags);
3071
3072 return TRUE;
3073 }
3074
3075 #endif // DBG && KDBG
3076
3077 /* EOF */