bccb0e9bbdde8e073a48aaff1fbbdd15407a88f3
[reactos.git] / ntoskrnl / mm / ARM3 / expool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
17
18 #undef ExAllocatePoolWithQuota
19 #undef ExAllocatePoolWithQuotaTag
20
21 /* GLOBALS ********************************************************************/
22
23 #define POOL_BIG_TABLE_ENTRY_FREE 0x1
24
25 typedef struct _POOL_DPC_CONTEXT
26 {
27 PPOOL_TRACKER_TABLE PoolTrackTable;
28 SIZE_T PoolTrackTableSize;
29 PPOOL_TRACKER_TABLE PoolTrackTableExpansion;
30 SIZE_T PoolTrackTableSizeExpansion;
31 } POOL_DPC_CONTEXT, *PPOOL_DPC_CONTEXT;
32
33 ULONG ExpNumberOfPagedPools;
34 POOL_DESCRIPTOR NonPagedPoolDescriptor;
35 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16 + 1];
36 PPOOL_DESCRIPTOR PoolVector[2];
37 PKGUARDED_MUTEX ExpPagedPoolMutex;
38 SIZE_T PoolTrackTableSize, PoolTrackTableMask;
39 SIZE_T PoolBigPageTableSize, PoolBigPageTableHash;
40 ULONG ExpBigTableExpansionFailed;
41 PPOOL_TRACKER_TABLE PoolTrackTable;
42 PPOOL_TRACKER_BIG_PAGES PoolBigPageTable;
43 KSPIN_LOCK ExpTaggedPoolLock;
44 ULONG PoolHitTag;
45 BOOLEAN ExStopBadTags;
46 KSPIN_LOCK ExpLargePoolTableLock;
47 ULONG ExpPoolBigEntriesInUse;
48 ULONG ExpPoolFlags;
49 ULONG ExPoolFailures;
50 ULONGLONG MiLastPoolDumpTime;
51
52 /* Pool block/header/list access macros */
53 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
54 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER))
55 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
56 #define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
57 #define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize))
58
59 /*
60 * Pool list access debug macros, similar to Arthur's pfnlist.c work.
61 * Microsoft actually implements similar checks in the Windows Server 2003 SP1
62 * pool code, but only for checked builds.
63 *
64 * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
65 * that these checks are done even on retail builds, due to the increasing
66 * number of kernel-mode attacks which depend on dangling list pointers and other
67 * kinds of list-based attacks.
68 *
69 * For now, I will leave these checks on all the time, but later they are likely
70 * to be DBG-only, at least until there are enough kernel-mode security attacks
71 * against ReactOS to warrant the performance hit.
72 *
73 * For now, these are not made inline, so we can get good stack traces.
74 */
75 PLIST_ENTRY
76 NTAPI
77 ExpDecodePoolLink(IN PLIST_ENTRY Link)
78 {
79 return (PLIST_ENTRY)((ULONG_PTR)Link & ~1);
80 }
81
82 PLIST_ENTRY
83 NTAPI
84 ExpEncodePoolLink(IN PLIST_ENTRY Link)
85 {
86 return (PLIST_ENTRY)((ULONG_PTR)Link | 1);
87 }
88
89 VOID
90 NTAPI
91 ExpCheckPoolLinks(IN PLIST_ENTRY ListHead)
92 {
93 if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) ||
94 (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead))
95 {
96 KeBugCheckEx(BAD_POOL_HEADER,
97 3,
98 (ULONG_PTR)ListHead,
99 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink),
100 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink));
101 }
102 }
103
104 VOID
105 NTAPI
106 ExpInitializePoolListHead(IN PLIST_ENTRY ListHead)
107 {
108 ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead);
109 }
110
111 BOOLEAN
112 NTAPI
113 ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead)
114 {
115 return (ExpDecodePoolLink(ListHead->Flink) == ListHead);
116 }
117
118 VOID
119 NTAPI
120 ExpRemovePoolEntryList(IN PLIST_ENTRY Entry)
121 {
122 PLIST_ENTRY Blink, Flink;
123 Flink = ExpDecodePoolLink(Entry->Flink);
124 Blink = ExpDecodePoolLink(Entry->Blink);
125 Flink->Blink = ExpEncodePoolLink(Blink);
126 Blink->Flink = ExpEncodePoolLink(Flink);
127 }
128
129 PLIST_ENTRY
130 NTAPI
131 ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead)
132 {
133 PLIST_ENTRY Entry, Flink;
134 Entry = ExpDecodePoolLink(ListHead->Flink);
135 Flink = ExpDecodePoolLink(Entry->Flink);
136 ListHead->Flink = ExpEncodePoolLink(Flink);
137 Flink->Blink = ExpEncodePoolLink(ListHead);
138 return Entry;
139 }
140
141 PLIST_ENTRY
142 NTAPI
143 ExpRemovePoolTailList(IN PLIST_ENTRY ListHead)
144 {
145 PLIST_ENTRY Entry, Blink;
146 Entry = ExpDecodePoolLink(ListHead->Blink);
147 Blink = ExpDecodePoolLink(Entry->Blink);
148 ListHead->Blink = ExpEncodePoolLink(Blink);
149 Blink->Flink = ExpEncodePoolLink(ListHead);
150 return Entry;
151 }
152
153 VOID
154 NTAPI
155 ExpInsertPoolTailList(IN PLIST_ENTRY ListHead,
156 IN PLIST_ENTRY Entry)
157 {
158 PLIST_ENTRY Blink;
159 ExpCheckPoolLinks(ListHead);
160 Blink = ExpDecodePoolLink(ListHead->Blink);
161 Entry->Flink = ExpEncodePoolLink(ListHead);
162 Entry->Blink = ExpEncodePoolLink(Blink);
163 Blink->Flink = ExpEncodePoolLink(Entry);
164 ListHead->Blink = ExpEncodePoolLink(Entry);
165 ExpCheckPoolLinks(ListHead);
166 }
167
168 VOID
169 NTAPI
170 ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead,
171 IN PLIST_ENTRY Entry)
172 {
173 PLIST_ENTRY Flink;
174 ExpCheckPoolLinks(ListHead);
175 Flink = ExpDecodePoolLink(ListHead->Flink);
176 Entry->Flink = ExpEncodePoolLink(Flink);
177 Entry->Blink = ExpEncodePoolLink(ListHead);
178 Flink->Blink = ExpEncodePoolLink(Entry);
179 ListHead->Flink = ExpEncodePoolLink(Entry);
180 ExpCheckPoolLinks(ListHead);
181 }
182
183 VOID
184 NTAPI
185 ExpCheckPoolHeader(IN PPOOL_HEADER Entry)
186 {
187 PPOOL_HEADER PreviousEntry, NextEntry;
188
189 /* Is there a block before this one? */
190 if (Entry->PreviousSize)
191 {
192 /* Get it */
193 PreviousEntry = POOL_PREV_BLOCK(Entry);
194
195 /* The two blocks must be on the same page! */
196 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry))
197 {
198 /* Something is awry */
199 KeBugCheckEx(BAD_POOL_HEADER,
200 6,
201 (ULONG_PTR)PreviousEntry,
202 __LINE__,
203 (ULONG_PTR)Entry);
204 }
205
206 /* This block should also indicate that it's as large as we think it is */
207 if (PreviousEntry->BlockSize != Entry->PreviousSize)
208 {
209 /* Otherwise, someone corrupted one of the sizes */
210 DPRINT1("PreviousEntry BlockSize %lu, tag %.4s. Entry PreviousSize %lu, tag %.4s\n",
211 PreviousEntry->BlockSize, (char *)&PreviousEntry->PoolTag,
212 Entry->PreviousSize, (char *)&Entry->PoolTag);
213 KeBugCheckEx(BAD_POOL_HEADER,
214 5,
215 (ULONG_PTR)PreviousEntry,
216 __LINE__,
217 (ULONG_PTR)Entry);
218 }
219 }
220 else if (PAGE_ALIGN(Entry) != Entry)
221 {
222 /* If there's no block before us, we are the first block, so we should be on a page boundary */
223 KeBugCheckEx(BAD_POOL_HEADER,
224 7,
225 0,
226 __LINE__,
227 (ULONG_PTR)Entry);
228 }
229
230 /* This block must have a size */
231 if (!Entry->BlockSize)
232 {
233 /* Someone must've corrupted this field */
234 if (Entry->PreviousSize)
235 {
236 PreviousEntry = POOL_PREV_BLOCK(Entry);
237 DPRINT1("PreviousEntry tag %.4s. Entry tag %.4s\n",
238 (char *)&PreviousEntry->PoolTag,
239 (char *)&Entry->PoolTag);
240 }
241 else
242 {
243 DPRINT1("Entry tag %.4s\n",
244 (char *)&Entry->PoolTag);
245 }
246 KeBugCheckEx(BAD_POOL_HEADER,
247 8,
248 0,
249 __LINE__,
250 (ULONG_PTR)Entry);
251 }
252
253 /* Okay, now get the next block */
254 NextEntry = POOL_NEXT_BLOCK(Entry);
255
256 /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
257 if (PAGE_ALIGN(NextEntry) != NextEntry)
258 {
259 /* The two blocks must be on the same page! */
260 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry))
261 {
262 /* Something is messed up */
263 KeBugCheckEx(BAD_POOL_HEADER,
264 9,
265 (ULONG_PTR)NextEntry,
266 __LINE__,
267 (ULONG_PTR)Entry);
268 }
269
270 /* And this block should think we are as large as we truly are */
271 if (NextEntry->PreviousSize != Entry->BlockSize)
272 {
273 /* Otherwise, someone corrupted the field */
274 DPRINT1("Entry BlockSize %lu, tag %.4s. NextEntry PreviousSize %lu, tag %.4s\n",
275 Entry->BlockSize, (char *)&Entry->PoolTag,
276 NextEntry->PreviousSize, (char *)&NextEntry->PoolTag);
277 KeBugCheckEx(BAD_POOL_HEADER,
278 5,
279 (ULONG_PTR)NextEntry,
280 __LINE__,
281 (ULONG_PTR)Entry);
282 }
283 }
284 }
285
286 VOID
287 NTAPI
288 ExpCheckPoolAllocation(
289 PVOID P,
290 POOL_TYPE PoolType,
291 ULONG Tag)
292 {
293 PPOOL_HEADER Entry;
294 ULONG i;
295 KIRQL OldIrql;
296 POOL_TYPE RealPoolType;
297
298 /* Get the pool header */
299 Entry = ((PPOOL_HEADER)P) - 1;
300
301 /* Check if this is a large allocation */
302 if (PAGE_ALIGN(P) == P)
303 {
304 /* Lock the pool table */
305 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
306
307 /* Find the pool tag */
308 for (i = 0; i < PoolBigPageTableSize; i++)
309 {
310 /* Check if this is our allocation */
311 if (PoolBigPageTable[i].Va == P)
312 {
313 /* Make sure the tag is ok */
314 if (PoolBigPageTable[i].Key != Tag)
315 {
316 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, PoolBigPageTable[i].Key, Tag);
317 }
318
319 break;
320 }
321 }
322
323 /* Release the lock */
324 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
325
326 if (i == PoolBigPageTableSize)
327 {
328 /* Did not find the allocation */
329 //ASSERT(FALSE);
330 }
331
332 /* Get Pool type by address */
333 RealPoolType = MmDeterminePoolType(P);
334 }
335 else
336 {
337 /* Verify the tag */
338 if (Entry->PoolTag != Tag)
339 {
340 DPRINT1("Allocation has wrong pool tag! Expected '%.4s', got '%.4s' (0x%08lx)\n",
341 &Tag, &Entry->PoolTag, Entry->PoolTag);
342 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Entry->PoolTag, Tag);
343 }
344
345 /* Check the rest of the header */
346 ExpCheckPoolHeader(Entry);
347
348 /* Get Pool type from entry */
349 RealPoolType = (Entry->PoolType - 1);
350 }
351
352 /* Should we check the pool type? */
353 if (PoolType != -1)
354 {
355 /* Verify the pool type */
356 if (RealPoolType != PoolType)
357 {
358 DPRINT1("Wrong pool type! Expected %s, got %s\n",
359 PoolType & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool",
360 (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool");
361 KeBugCheckEx(BAD_POOL_CALLER, 0xCC, (ULONG_PTR)P, Entry->PoolTag, Tag);
362 }
363 }
364 }
365
366 VOID
367 NTAPI
368 ExpCheckPoolBlocks(IN PVOID Block)
369 {
370 BOOLEAN FoundBlock = FALSE;
371 SIZE_T Size = 0;
372 PPOOL_HEADER Entry;
373
374 /* Get the first entry for this page, make sure it really is the first */
375 Entry = PAGE_ALIGN(Block);
376 ASSERT(Entry->PreviousSize == 0);
377
378 /* Now scan each entry */
379 while (TRUE)
380 {
381 /* When we actually found our block, remember this */
382 if (Entry == Block) FoundBlock = TRUE;
383
384 /* Now validate this block header */
385 ExpCheckPoolHeader(Entry);
386
387 /* And go to the next one, keeping track of our size */
388 Size += Entry->BlockSize;
389 Entry = POOL_NEXT_BLOCK(Entry);
390
391 /* If we hit the last block, stop */
392 if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break;
393
394 /* If we hit the end of the page, stop */
395 if (PAGE_ALIGN(Entry) == Entry) break;
396 }
397
398 /* We must've found our block, and we must have hit the end of the page */
399 if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock))
400 {
401 /* Otherwise, the blocks are messed up */
402 KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry);
403 }
404 }
405
406 FORCEINLINE
407 VOID
408 ExpCheckPoolIrqlLevel(IN POOL_TYPE PoolType,
409 IN SIZE_T NumberOfBytes,
410 IN PVOID Entry)
411 {
412 //
413 // Validate IRQL: It must be APC_LEVEL or lower for Paged Pool, and it must
414 // be DISPATCH_LEVEL or lower for Non Paged Pool
415 //
416 if (((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) ?
417 (KeGetCurrentIrql() > APC_LEVEL) :
418 (KeGetCurrentIrql() > DISPATCH_LEVEL))
419 {
420 //
421 // Take the system down
422 //
423 KeBugCheckEx(BAD_POOL_CALLER,
424 !Entry ? POOL_ALLOC_IRQL_INVALID : POOL_FREE_IRQL_INVALID,
425 KeGetCurrentIrql(),
426 PoolType,
427 !Entry ? NumberOfBytes : (ULONG_PTR)Entry);
428 }
429 }
430
431 FORCEINLINE
432 ULONG
433 ExpComputeHashForTag(IN ULONG Tag,
434 IN SIZE_T BucketMask)
435 {
436 //
437 // Compute the hash by multiplying with a large prime number and then XORing
438 // with the HIDWORD of the result.
439 //
440 // Finally, AND with the bucket mask to generate a valid index/bucket into
441 // the table
442 //
443 ULONGLONG Result = (ULONGLONG)40543 * Tag;
444 return (ULONG)BucketMask & ((ULONG)Result ^ (Result >> 32));
445 }
446
447 FORCEINLINE
448 ULONG
449 ExpComputePartialHashForAddress(IN PVOID BaseAddress)
450 {
451 ULONG Result;
452 //
453 // Compute the hash by converting the address into a page number, and then
454 // XORing each nibble with the next one.
455 //
456 // We do *NOT* AND with the bucket mask at this point because big table expansion
457 // might happen. Therefore, the final step of the hash must be performed
458 // while holding the expansion pushlock, and this is why we call this a
459 // "partial" hash only.
460 //
461 Result = (ULONG)((ULONG_PTR)BaseAddress >> PAGE_SHIFT);
462 return (Result >> 24) ^ (Result >> 16) ^ (Result >> 8) ^ Result;
463 }
464
465 #if DBG
466 /*
467 * FORCEINLINE
468 * BOOLEAN
469 * ExpTagAllowPrint(CHAR Tag);
470 */
471 #define ExpTagAllowPrint(Tag) \
472 ((Tag) >= 0x20 /* Space */ && (Tag) <= 0x7E /* Tilde */)
473
474 #ifdef KDBG
475 #define MiDumperPrint(dbg, fmt, ...) \
476 if (dbg) KdbpPrint(fmt, ##__VA_ARGS__); \
477 else DPRINT1(fmt, ##__VA_ARGS__)
478 #else
479 #define MiDumperPrint(dbg, fmt, ...) \
480 DPRINT1(fmt, ##__VA_ARGS__)
481 #endif
482
483 VOID
484 MiDumpPoolConsumers(BOOLEAN CalledFromDbg, ULONG Tag, ULONG Mask, ULONG Flags)
485 {
486 SIZE_T i;
487 BOOLEAN Verbose;
488
489 //
490 // Only print header if called from OOM situation
491 //
492 if (!CalledFromDbg)
493 {
494 DPRINT1("---------------------\n");
495 DPRINT1("Out of memory dumper!\n");
496 }
497 #ifdef KDBG
498 else
499 {
500 KdbpPrint("Pool Used:\n");
501 }
502 #endif
503
504 //
505 // Remember whether we'll have to be verbose
506 // This is the only supported flag!
507 //
508 Verbose = BooleanFlagOn(Flags, 1);
509
510 //
511 // Print table header
512 //
513 if (Verbose)
514 {
515 MiDumperPrint(CalledFromDbg, "\t\t\t\tNonPaged\t\t\t\t\t\t\tPaged\n");
516 MiDumperPrint(CalledFromDbg, "Tag\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\n");
517 }
518 else
519 {
520 MiDumperPrint(CalledFromDbg, "\t\tNonPaged\t\t\tPaged\n");
521 MiDumperPrint(CalledFromDbg, "Tag\t\tAllocs\t\tUsed\t\tAllocs\t\tUsed\n");
522 }
523
524 //
525 // We'll extract allocations for all the tracked pools
526 //
527 for (i = 0; i < PoolTrackTableSize; ++i)
528 {
529 PPOOL_TRACKER_TABLE TableEntry;
530
531 TableEntry = &PoolTrackTable[i];
532
533 //
534 // We only care about tags which have allocated memory
535 //
536 if (TableEntry->NonPagedBytes != 0 || TableEntry->PagedBytes != 0)
537 {
538 //
539 // If there's a tag, attempt to do a pretty print
540 // only if it matches the caller's tag, or if
541 // any tag is allowed
542 // For checking whether it matches caller's tag,
543 // use the mask to make sure not to mess with the wildcards
544 //
545 if (TableEntry->Key != 0 && TableEntry->Key != TAG_NONE &&
546 (Tag == 0 || (TableEntry->Key & Mask) == (Tag & Mask)))
547 {
548 CHAR Tag[4];
549
550 //
551 // Extract each 'component' and check whether they are printable
552 //
553 Tag[0] = TableEntry->Key & 0xFF;
554 Tag[1] = TableEntry->Key >> 8 & 0xFF;
555 Tag[2] = TableEntry->Key >> 16 & 0xFF;
556 Tag[3] = TableEntry->Key >> 24 & 0xFF;
557
558 if (ExpTagAllowPrint(Tag[0]) && ExpTagAllowPrint(Tag[1]) && ExpTagAllowPrint(Tag[2]) && ExpTagAllowPrint(Tag[3]))
559 {
560 //
561 // Print in direct order to make !poolused TAG usage easier
562 //
563 if (Verbose)
564 {
565 MiDumperPrint(CalledFromDbg, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag[0], Tag[1], Tag[2], Tag[3],
566 TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees,
567 (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes,
568 TableEntry->PagedAllocs, TableEntry->PagedFrees,
569 (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes);
570 }
571 else
572 {
573 MiDumperPrint(CalledFromDbg, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag[0], Tag[1], Tag[2], Tag[3],
574 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
575 TableEntry->PagedAllocs, TableEntry->PagedBytes);
576 }
577 }
578 else
579 {
580 if (Verbose)
581 {
582 MiDumperPrint(CalledFromDbg, "0x%08x\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry->Key,
583 TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees,
584 (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes,
585 TableEntry->PagedAllocs, TableEntry->PagedFrees,
586 (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes);
587 }
588 else
589 {
590 MiDumperPrint(CalledFromDbg, "0x%08x\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry->Key,
591 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
592 TableEntry->PagedAllocs, TableEntry->PagedBytes);
593 }
594 }
595 }
596 else if (Tag == 0 || (Tag & Mask) == (TAG_NONE & Mask))
597 {
598 if (Verbose)
599 {
600 MiDumperPrint(CalledFromDbg, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n",
601 TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees,
602 (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes,
603 TableEntry->PagedAllocs, TableEntry->PagedFrees,
604 (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes);
605 }
606 else
607 {
608 MiDumperPrint(CalledFromDbg, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n",
609 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
610 TableEntry->PagedAllocs, TableEntry->PagedBytes);
611 }
612 }
613 }
614 }
615
616 if (!CalledFromDbg)
617 {
618 DPRINT1("---------------------\n");
619 }
620 }
621 #endif
622
623 /* PRIVATE FUNCTIONS **********************************************************/
624
625 INIT_FUNCTION
626 VOID
627 NTAPI
628 ExpSeedHotTags(VOID)
629 {
630 ULONG i, Key, Hash, Index;
631 PPOOL_TRACKER_TABLE TrackTable = PoolTrackTable;
632 ULONG TagList[] =
633 {
634 ' oI',
635 ' laH',
636 'PldM',
637 'LooP',
638 'tSbO',
639 ' prI',
640 'bdDN',
641 'LprI',
642 'pOoI',
643 ' ldM',
644 'eliF',
645 'aVMC',
646 'dSeS',
647 'CFtN',
648 'looP',
649 'rPCT',
650 'bNMC',
651 'dTeS',
652 'sFtN',
653 'TPCT',
654 'CPCT',
655 ' yeK',
656 'qSbO',
657 'mNoI',
658 'aEoI',
659 'cPCT',
660 'aFtN',
661 '0ftN',
662 'tceS',
663 'SprI',
664 'ekoT',
665 ' eS',
666 'lCbO',
667 'cScC',
668 'lFtN',
669 'cAeS',
670 'mfSF',
671 'kWcC',
672 'miSF',
673 'CdfA',
674 'EdfA',
675 'orSF',
676 'nftN',
677 'PRIU',
678 'rFpN',
679 'RFpN',
680 'aPeS',
681 'sUeS',
682 'FpcA',
683 'MpcA',
684 'cSeS',
685 'mNbO',
686 'sFpN',
687 'uLeS',
688 'DPcS',
689 'nevE',
690 'vrqR',
691 'ldaV',
692 ' pP',
693 'SdaV',
694 ' daV',
695 'LdaV',
696 'FdaV',
697 ' GIB',
698 };
699
700 //
701 // Loop all 64 hot tags
702 //
703 ASSERT((sizeof(TagList) / sizeof(ULONG)) == 64);
704 for (i = 0; i < sizeof(TagList) / sizeof(ULONG); i++)
705 {
706 //
707 // Get the current tag, and compute its hash in the tracker table
708 //
709 Key = TagList[i];
710 Hash = ExpComputeHashForTag(Key, PoolTrackTableMask);
711
712 //
713 // Loop all the hashes in this index/bucket
714 //
715 Index = Hash;
716 while (TRUE)
717 {
718 //
719 // Find an empty entry, and make sure this isn't the last hash that
720 // can fit.
721 //
722 // On checked builds, also make sure this is the first time we are
723 // seeding this tag.
724 //
725 ASSERT(TrackTable[Hash].Key != Key);
726 if (!(TrackTable[Hash].Key) && (Hash != PoolTrackTableSize - 1))
727 {
728 //
729 // It has been seeded, move on to the next tag
730 //
731 TrackTable[Hash].Key = Key;
732 break;
733 }
734
735 //
736 // This entry was already taken, compute the next possible hash while
737 // making sure we're not back at our initial index.
738 //
739 ASSERT(TrackTable[Hash].Key != Key);
740 Hash = (Hash + 1) & PoolTrackTableMask;
741 if (Hash == Index) break;
742 }
743 }
744 }
745
746 VOID
747 NTAPI
748 ExpRemovePoolTracker(IN ULONG Key,
749 IN SIZE_T NumberOfBytes,
750 IN POOL_TYPE PoolType)
751 {
752 ULONG Hash, Index;
753 PPOOL_TRACKER_TABLE Table, TableEntry;
754 SIZE_T TableMask, TableSize;
755
756 //
757 // Remove the PROTECTED_POOL flag which is not part of the tag
758 //
759 Key &= ~PROTECTED_POOL;
760
761 //
762 // With WinDBG you can set a tag you want to break on when an allocation is
763 // attempted
764 //
765 if (Key == PoolHitTag) DbgBreakPoint();
766
767 //
768 // Why the double indirection? Because normally this function is also used
769 // when doing session pool allocations, which has another set of tables,
770 // sizes, and masks that live in session pool. Now we don't support session
771 // pool so we only ever use the regular tables, but I'm keeping the code this
772 // way so that the day we DO support session pool, it won't require that
773 // many changes
774 //
775 Table = PoolTrackTable;
776 TableMask = PoolTrackTableMask;
777 TableSize = PoolTrackTableSize;
778 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize);
779
780 //
781 // Compute the hash for this key, and loop all the possible buckets
782 //
783 Hash = ExpComputeHashForTag(Key, TableMask);
784 Index = Hash;
785 while (TRUE)
786 {
787 //
788 // Have we found the entry for this tag? */
789 //
790 TableEntry = &Table[Hash];
791 if (TableEntry->Key == Key)
792 {
793 //
794 // Decrement the counters depending on if this was paged or nonpaged
795 // pool
796 //
797 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
798 {
799 InterlockedIncrement(&TableEntry->NonPagedFrees);
800 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes,
801 -(SSIZE_T)NumberOfBytes);
802 return;
803 }
804 InterlockedIncrement(&TableEntry->PagedFrees);
805 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes,
806 -(SSIZE_T)NumberOfBytes);
807 return;
808 }
809
810 //
811 // We should have only ended up with an empty entry if we've reached
812 // the last bucket
813 //
814 if (!TableEntry->Key)
815 {
816 DPRINT1("Empty item reached in tracker table. Hash=0x%lx, TableMask=0x%lx, Tag=0x%08lx, NumberOfBytes=%lu, PoolType=%d\n",
817 Hash, TableMask, Key, (ULONG)NumberOfBytes, PoolType);
818 ASSERT(Hash == TableMask);
819 }
820
821 //
822 // This path is hit when we don't have an entry, and the current bucket
823 // is full, so we simply try the next one
824 //
825 Hash = (Hash + 1) & TableMask;
826 if (Hash == Index) break;
827 }
828
829 //
830 // And finally this path is hit when all the buckets are full, and we need
831 // some expansion. This path is not yet supported in ReactOS and so we'll
832 // ignore the tag
833 //
834 DPRINT1("Out of pool tag space, ignoring...\n");
835 }
836
837 VOID
838 NTAPI
839 ExpInsertPoolTracker(IN ULONG Key,
840 IN SIZE_T NumberOfBytes,
841 IN POOL_TYPE PoolType)
842 {
843 ULONG Hash, Index;
844 KIRQL OldIrql;
845 PPOOL_TRACKER_TABLE Table, TableEntry;
846 SIZE_T TableMask, TableSize;
847
848 //
849 // Remove the PROTECTED_POOL flag which is not part of the tag
850 //
851 Key &= ~PROTECTED_POOL;
852
853 //
854 // With WinDBG you can set a tag you want to break on when an allocation is
855 // attempted
856 //
857 if (Key == PoolHitTag) DbgBreakPoint();
858
859 //
860 // There is also an internal flag you can set to break on malformed tags
861 //
862 if (ExStopBadTags) ASSERT(Key & 0xFFFFFF00);
863
864 //
865 // ASSERT on ReactOS features not yet supported
866 //
867 ASSERT(!(PoolType & SESSION_POOL_MASK));
868 ASSERT(KeGetCurrentProcessorNumber() == 0);
869
870 //
871 // Why the double indirection? Because normally this function is also used
872 // when doing session pool allocations, which has another set of tables,
873 // sizes, and masks that live in session pool. Now we don't support session
874 // pool so we only ever use the regular tables, but I'm keeping the code this
875 // way so that the day we DO support session pool, it won't require that
876 // many changes
877 //
878 Table = PoolTrackTable;
879 TableMask = PoolTrackTableMask;
880 TableSize = PoolTrackTableSize;
881 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize);
882
883 //
884 // Compute the hash for this key, and loop all the possible buckets
885 //
886 Hash = ExpComputeHashForTag(Key, TableMask);
887 Index = Hash;
888 while (TRUE)
889 {
890 //
891 // Do we already have an entry for this tag? */
892 //
893 TableEntry = &Table[Hash];
894 if (TableEntry->Key == Key)
895 {
896 //
897 // Increment the counters depending on if this was paged or nonpaged
898 // pool
899 //
900 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
901 {
902 InterlockedIncrement(&TableEntry->NonPagedAllocs);
903 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes, NumberOfBytes);
904 return;
905 }
906 InterlockedIncrement(&TableEntry->PagedAllocs);
907 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes, NumberOfBytes);
908 return;
909 }
910
911 //
912 // We don't have an entry yet, but we've found a free bucket for it
913 //
914 if (!(TableEntry->Key) && (Hash != PoolTrackTableSize - 1))
915 {
916 //
917 // We need to hold the lock while creating a new entry, since other
918 // processors might be in this code path as well
919 //
920 ExAcquireSpinLock(&ExpTaggedPoolLock, &OldIrql);
921 if (!PoolTrackTable[Hash].Key)
922 {
923 //
924 // We've won the race, so now create this entry in the bucket
925 //
926 ASSERT(Table[Hash].Key == 0);
927 PoolTrackTable[Hash].Key = Key;
928 TableEntry->Key = Key;
929 }
930 ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql);
931
932 //
933 // Now we force the loop to run again, and we should now end up in
934 // the code path above which does the interlocked increments...
935 //
936 continue;
937 }
938
939 //
940 // This path is hit when we don't have an entry, and the current bucket
941 // is full, so we simply try the next one
942 //
943 Hash = (Hash + 1) & TableMask;
944 if (Hash == Index) break;
945 }
946
947 //
948 // And finally this path is hit when all the buckets are full, and we need
949 // some expansion. This path is not yet supported in ReactOS and so we'll
950 // ignore the tag
951 //
952 DPRINT1("Out of pool tag space, ignoring...\n");
953 }
954
955 INIT_FUNCTION
956 VOID
957 NTAPI
958 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor,
959 IN POOL_TYPE PoolType,
960 IN ULONG PoolIndex,
961 IN ULONG Threshold,
962 IN PVOID PoolLock)
963 {
964 PLIST_ENTRY NextEntry, LastEntry;
965
966 //
967 // Setup the descriptor based on the caller's request
968 //
969 PoolDescriptor->PoolType = PoolType;
970 PoolDescriptor->PoolIndex = PoolIndex;
971 PoolDescriptor->Threshold = Threshold;
972 PoolDescriptor->LockAddress = PoolLock;
973
974 //
975 // Initialize accounting data
976 //
977 PoolDescriptor->RunningAllocs = 0;
978 PoolDescriptor->RunningDeAllocs = 0;
979 PoolDescriptor->TotalPages = 0;
980 PoolDescriptor->TotalBytes = 0;
981 PoolDescriptor->TotalBigPages = 0;
982
983 //
984 // Nothing pending for now
985 //
986 PoolDescriptor->PendingFrees = NULL;
987 PoolDescriptor->PendingFreeDepth = 0;
988
989 //
990 // Loop all the descriptor's allocation lists and initialize them
991 //
992 NextEntry = PoolDescriptor->ListHeads;
993 LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
994 while (NextEntry < LastEntry)
995 {
996 ExpInitializePoolListHead(NextEntry);
997 NextEntry++;
998 }
999
1000 //
1001 // Note that ReactOS does not support Session Pool Yet
1002 //
1003 ASSERT(PoolType != PagedPoolSession);
1004 }
1005
1006 INIT_FUNCTION
1007 VOID
1008 NTAPI
1009 InitializePool(IN POOL_TYPE PoolType,
1010 IN ULONG Threshold)
1011 {
1012 PPOOL_DESCRIPTOR Descriptor;
1013 SIZE_T TableSize;
1014 ULONG i;
1015
1016 //
1017 // Check what kind of pool this is
1018 //
1019 if (PoolType == NonPagedPool)
1020 {
1021 //
1022 // Compute the track table size and convert it from a power of two to an
1023 // actual byte size
1024 //
1025 // NOTE: On checked builds, we'll assert if the registry table size was
1026 // invalid, while on retail builds we'll just break out of the loop at
1027 // that point.
1028 //
1029 TableSize = min(PoolTrackTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
1030 for (i = 0; i < 32; i++)
1031 {
1032 if (TableSize & 1)
1033 {
1034 ASSERT((TableSize & ~1) == 0);
1035 if (!(TableSize & ~1)) break;
1036 }
1037 TableSize >>= 1;
1038 }
1039
1040 //
1041 // If we hit bit 32, than no size was defined in the registry, so
1042 // we'll use the default size of 2048 entries.
1043 //
1044 // Otherwise, use the size from the registry, as long as it's not
1045 // smaller than 64 entries.
1046 //
1047 if (i == 32)
1048 {
1049 PoolTrackTableSize = 2048;
1050 }
1051 else
1052 {
1053 PoolTrackTableSize = max(1 << i, 64);
1054 }
1055
1056 //
1057 // Loop trying with the biggest specified size first, and cut it down
1058 // by a power of two each iteration in case not enough memory exist
1059 //
1060 while (TRUE)
1061 {
1062 //
1063 // Do not allow overflow
1064 //
1065 if ((PoolTrackTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_TABLE)))
1066 {
1067 PoolTrackTableSize >>= 1;
1068 continue;
1069 }
1070
1071 //
1072 // Allocate the tracker table and exit the loop if this worked
1073 //
1074 PoolTrackTable = MiAllocatePoolPages(NonPagedPool,
1075 (PoolTrackTableSize + 1) *
1076 sizeof(POOL_TRACKER_TABLE));
1077 if (PoolTrackTable) break;
1078
1079 //
1080 // Otherwise, as long as we're not down to the last bit, keep
1081 // iterating
1082 //
1083 if (PoolTrackTableSize == 1)
1084 {
1085 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1086 TableSize,
1087 0xFFFFFFFF,
1088 0xFFFFFFFF,
1089 0xFFFFFFFF);
1090 }
1091 PoolTrackTableSize >>= 1;
1092 }
1093
1094 //
1095 // Add one entry, compute the hash, and zero the table
1096 //
1097 PoolTrackTableSize++;
1098 PoolTrackTableMask = PoolTrackTableSize - 2;
1099
1100 RtlZeroMemory(PoolTrackTable,
1101 PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1102
1103 //
1104 // Finally, add the most used tags to speed up those allocations
1105 //
1106 ExpSeedHotTags();
1107
1108 //
1109 // We now do the exact same thing with the tracker table for big pages
1110 //
1111 TableSize = min(PoolBigPageTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
1112 for (i = 0; i < 32; i++)
1113 {
1114 if (TableSize & 1)
1115 {
1116 ASSERT((TableSize & ~1) == 0);
1117 if (!(TableSize & ~1)) break;
1118 }
1119 TableSize >>= 1;
1120 }
1121
1122 //
1123 // For big pages, the default tracker table is 4096 entries, while the
1124 // minimum is still 64
1125 //
1126 if (i == 32)
1127 {
1128 PoolBigPageTableSize = 4096;
1129 }
1130 else
1131 {
1132 PoolBigPageTableSize = max(1 << i, 64);
1133 }
1134
1135 //
1136 // Again, run the exact same loop we ran earlier, but this time for the
1137 // big pool tracker instead
1138 //
1139 while (TRUE)
1140 {
1141 if ((PoolBigPageTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_BIG_PAGES)))
1142 {
1143 PoolBigPageTableSize >>= 1;
1144 continue;
1145 }
1146
1147 PoolBigPageTable = MiAllocatePoolPages(NonPagedPool,
1148 PoolBigPageTableSize *
1149 sizeof(POOL_TRACKER_BIG_PAGES));
1150 if (PoolBigPageTable) break;
1151
1152 if (PoolBigPageTableSize == 1)
1153 {
1154 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1155 TableSize,
1156 0xFFFFFFFF,
1157 0xFFFFFFFF,
1158 0xFFFFFFFF);
1159 }
1160
1161 PoolBigPageTableSize >>= 1;
1162 }
1163
1164 //
1165 // An extra entry is not needed for for the big pool tracker, so just
1166 // compute the hash and zero it
1167 //
1168 PoolBigPageTableHash = PoolBigPageTableSize - 1;
1169 RtlZeroMemory(PoolBigPageTable,
1170 PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
1171 for (i = 0; i < PoolBigPageTableSize; i++)
1172 {
1173 PoolBigPageTable[i].Va = (PVOID)POOL_BIG_TABLE_ENTRY_FREE;
1174 }
1175
1176 //
1177 // During development, print this out so we can see what's happening
1178 //
1179 DPRINT("EXPOOL: Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1180 PoolTrackTable, PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1181 DPRINT("EXPOOL: Big Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1182 PoolBigPageTable, PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
1183
1184 //
1185 // Insert the generic tracker for all of big pool
1186 //
1187 ExpInsertPoolTracker('looP',
1188 ROUND_TO_PAGES(PoolBigPageTableSize *
1189 sizeof(POOL_TRACKER_BIG_PAGES)),
1190 NonPagedPool);
1191
1192 //
1193 // No support for NUMA systems at this time
1194 //
1195 ASSERT(KeNumberNodes == 1);
1196
1197 //
1198 // Initialize the tag spinlock
1199 //
1200 KeInitializeSpinLock(&ExpTaggedPoolLock);
1201
1202 //
1203 // Initialize the nonpaged pool descriptor
1204 //
1205 PoolVector[NonPagedPool] = &NonPagedPoolDescriptor;
1206 ExInitializePoolDescriptor(PoolVector[NonPagedPool],
1207 NonPagedPool,
1208 0,
1209 Threshold,
1210 NULL);
1211 }
1212 else
1213 {
1214 //
1215 // No support for NUMA systems at this time
1216 //
1217 ASSERT(KeNumberNodes == 1);
1218
1219 //
1220 // Allocate the pool descriptor
1221 //
1222 Descriptor = ExAllocatePoolWithTag(NonPagedPool,
1223 sizeof(KGUARDED_MUTEX) +
1224 sizeof(POOL_DESCRIPTOR),
1225 'looP');
1226 if (!Descriptor)
1227 {
1228 //
1229 // This is really bad...
1230 //
1231 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1232 0,
1233 -1,
1234 -1,
1235 -1);
1236 }
1237
1238 //
1239 // Setup the vector and guarded mutex for paged pool
1240 //
1241 PoolVector[PagedPool] = Descriptor;
1242 ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1);
1243 ExpPagedPoolDescriptor[0] = Descriptor;
1244 KeInitializeGuardedMutex(ExpPagedPoolMutex);
1245 ExInitializePoolDescriptor(Descriptor,
1246 PagedPool,
1247 0,
1248 Threshold,
1249 ExpPagedPoolMutex);
1250
1251 //
1252 // Insert the generic tracker for all of nonpaged pool
1253 //
1254 ExpInsertPoolTracker('looP',
1255 ROUND_TO_PAGES(PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)),
1256 NonPagedPool);
1257 }
1258 }
1259
1260 FORCEINLINE
1261 KIRQL
1262 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
1263 {
1264 //
1265 // Check if this is nonpaged pool
1266 //
1267 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1268 {
1269 //
1270 // Use the queued spin lock
1271 //
1272 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
1273 }
1274 else
1275 {
1276 //
1277 // Use the guarded mutex
1278 //
1279 KeAcquireGuardedMutex(Descriptor->LockAddress);
1280 return APC_LEVEL;
1281 }
1282 }
1283
1284 FORCEINLINE
1285 VOID
1286 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor,
1287 IN KIRQL OldIrql)
1288 {
1289 //
1290 // Check if this is nonpaged pool
1291 //
1292 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1293 {
1294 //
1295 // Use the queued spin lock
1296 //
1297 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
1298 }
1299 else
1300 {
1301 //
1302 // Use the guarded mutex
1303 //
1304 KeReleaseGuardedMutex(Descriptor->LockAddress);
1305 }
1306 }
1307
1308 VOID
1309 NTAPI
1310 ExpGetPoolTagInfoTarget(IN PKDPC Dpc,
1311 IN PVOID DeferredContext,
1312 IN PVOID SystemArgument1,
1313 IN PVOID SystemArgument2)
1314 {
1315 PPOOL_DPC_CONTEXT Context = DeferredContext;
1316 UNREFERENCED_PARAMETER(Dpc);
1317 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
1318
1319 //
1320 // Make sure we win the race, and if we did, copy the data atomically
1321 //
1322 if (KeSignalCallDpcSynchronize(SystemArgument2))
1323 {
1324 RtlCopyMemory(Context->PoolTrackTable,
1325 PoolTrackTable,
1326 Context->PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1327
1328 //
1329 // This is here because ReactOS does not yet support expansion
1330 //
1331 ASSERT(Context->PoolTrackTableSizeExpansion == 0);
1332 }
1333
1334 //
1335 // Regardless of whether we won or not, we must now synchronize and then
1336 // decrement the barrier since this is one more processor that has completed
1337 // the callback.
1338 //
1339 KeSignalCallDpcSynchronize(SystemArgument2);
1340 KeSignalCallDpcDone(SystemArgument1);
1341 }
1342
1343 NTSTATUS
1344 NTAPI
1345 ExGetPoolTagInfo(IN PSYSTEM_POOLTAG_INFORMATION SystemInformation,
1346 IN ULONG SystemInformationLength,
1347 IN OUT PULONG ReturnLength OPTIONAL)
1348 {
1349 ULONG TableSize, CurrentLength;
1350 ULONG EntryCount;
1351 NTSTATUS Status = STATUS_SUCCESS;
1352 PSYSTEM_POOLTAG TagEntry;
1353 PPOOL_TRACKER_TABLE Buffer, TrackerEntry;
1354 POOL_DPC_CONTEXT Context;
1355 ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
1356
1357 //
1358 // Keep track of how much data the caller's buffer must hold
1359 //
1360 CurrentLength = FIELD_OFFSET(SYSTEM_POOLTAG_INFORMATION, TagInfo);
1361
1362 //
1363 // Initialize the caller's buffer
1364 //
1365 TagEntry = &SystemInformation->TagInfo[0];
1366 SystemInformation->Count = 0;
1367
1368 //
1369 // Capture the number of entries, and the total size needed to make a copy
1370 // of the table
1371 //
1372 EntryCount = (ULONG)PoolTrackTableSize;
1373 TableSize = EntryCount * sizeof(POOL_TRACKER_TABLE);
1374
1375 //
1376 // Allocate the "Generic DPC" temporary buffer
1377 //
1378 Buffer = ExAllocatePoolWithTag(NonPagedPool, TableSize, 'ofnI');
1379 if (!Buffer) return STATUS_INSUFFICIENT_RESOURCES;
1380
1381 //
1382 // Do a "Generic DPC" to atomically retrieve the tag and allocation data
1383 //
1384 Context.PoolTrackTable = Buffer;
1385 Context.PoolTrackTableSize = PoolTrackTableSize;
1386 Context.PoolTrackTableExpansion = NULL;
1387 Context.PoolTrackTableSizeExpansion = 0;
1388 KeGenericCallDpc(ExpGetPoolTagInfoTarget, &Context);
1389
1390 //
1391 // Now parse the results
1392 //
1393 for (TrackerEntry = Buffer; TrackerEntry < (Buffer + EntryCount); TrackerEntry++)
1394 {
1395 //
1396 // If the entry is empty, skip it
1397 //
1398 if (!TrackerEntry->Key) continue;
1399
1400 //
1401 // Otherwise, add one more entry to the caller's buffer, and ensure that
1402 // enough space has been allocated in it
1403 //
1404 SystemInformation->Count++;
1405 CurrentLength += sizeof(*TagEntry);
1406 if (SystemInformationLength < CurrentLength)
1407 {
1408 //
1409 // The caller's buffer is too small, so set a failure code. The
1410 // caller will know the count, as well as how much space is needed.
1411 //
1412 // We do NOT break out of the loop, because we want to keep incrementing
1413 // the Count as well as CurrentLength so that the caller can know the
1414 // final numbers
1415 //
1416 Status = STATUS_INFO_LENGTH_MISMATCH;
1417 }
1418 else
1419 {
1420 //
1421 // Small sanity check that our accounting is working correctly
1422 //
1423 ASSERT(TrackerEntry->PagedAllocs >= TrackerEntry->PagedFrees);
1424 ASSERT(TrackerEntry->NonPagedAllocs >= TrackerEntry->NonPagedFrees);
1425
1426 //
1427 // Return the data into the caller's buffer
1428 //
1429 TagEntry->TagUlong = TrackerEntry->Key;
1430 TagEntry->PagedAllocs = TrackerEntry->PagedAllocs;
1431 TagEntry->PagedFrees = TrackerEntry->PagedFrees;
1432 TagEntry->PagedUsed = TrackerEntry->PagedBytes;
1433 TagEntry->NonPagedAllocs = TrackerEntry->NonPagedAllocs;
1434 TagEntry->NonPagedFrees = TrackerEntry->NonPagedFrees;
1435 TagEntry->NonPagedUsed = TrackerEntry->NonPagedBytes;
1436 TagEntry++;
1437 }
1438 }
1439
1440 //
1441 // Free the "Generic DPC" temporary buffer, return the buffer length and status
1442 //
1443 ExFreePoolWithTag(Buffer, 'ofnI');
1444 if (ReturnLength) *ReturnLength = CurrentLength;
1445 return Status;
1446 }
1447
1448 _IRQL_requires_(DISPATCH_LEVEL)
1449 BOOLEAN
1450 NTAPI
1451 ExpExpandBigPageTable(
1452 _In_ _IRQL_restores_ KIRQL OldIrql)
1453 {
1454 ULONG OldSize = PoolBigPageTableSize;
1455 ULONG NewSize = 2 * OldSize;
1456 ULONG NewSizeInBytes;
1457 PPOOL_TRACKER_BIG_PAGES NewTable;
1458 PPOOL_TRACKER_BIG_PAGES OldTable;
1459 ULONG i;
1460 ULONG PagesFreed;
1461 ULONG Hash;
1462 ULONG HashMask;
1463
1464 /* Must be holding ExpLargePoolTableLock */
1465 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
1466
1467 /* Make sure we don't overflow */
1468 if (!NT_SUCCESS(RtlULongMult(2,
1469 OldSize * sizeof(POOL_TRACKER_BIG_PAGES),
1470 &NewSizeInBytes)))
1471 {
1472 DPRINT1("Overflow expanding big page table. Size=%lu\n", OldSize);
1473 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1474 return FALSE;
1475 }
1476
1477 NewTable = MiAllocatePoolPages(NonPagedPool, NewSizeInBytes);
1478 if (NewTable == NULL)
1479 {
1480 DPRINT1("Could not allocate %lu bytes for new big page table\n", NewSizeInBytes);
1481 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1482 return FALSE;
1483 }
1484
1485 DPRINT("Expanding big pool tracker table to %lu entries\n", NewSize);
1486
1487 /* Initialize the new table */
1488 RtlZeroMemory(NewTable, NewSizeInBytes);
1489 for (i = 0; i < NewSize; i++)
1490 {
1491 NewTable[i].Va = (PVOID)POOL_BIG_TABLE_ENTRY_FREE;
1492 }
1493
1494 /* Copy over all items */
1495 OldTable = PoolBigPageTable;
1496 HashMask = NewSize - 1;
1497 for (i = 0; i < OldSize; i++)
1498 {
1499 /* Skip over empty items */
1500 if ((ULONG_PTR)OldTable[i].Va & POOL_BIG_TABLE_ENTRY_FREE)
1501 {
1502 continue;
1503 }
1504
1505 /* Recalculate the hash due to the new table size */
1506 Hash = ExpComputePartialHashForAddress(OldTable[i].Va) & HashMask;
1507
1508 /* Find the location in the new table */
1509 while (!((ULONG_PTR)NewTable[Hash].Va & POOL_BIG_TABLE_ENTRY_FREE))
1510 {
1511 Hash = (Hash + 1) & HashMask;
1512 }
1513
1514 /* We just enlarged the table, so we must have space */
1515 ASSERT((ULONG_PTR)NewTable[Hash].Va & POOL_BIG_TABLE_ENTRY_FREE);
1516
1517 /* Finally, copy the item */
1518 NewTable[Hash] = OldTable[i];
1519 }
1520
1521 /* Activate the new table */
1522 PoolBigPageTable = NewTable;
1523 PoolBigPageTableSize = NewSize;
1524 PoolBigPageTableHash = PoolBigPageTableSize - 1;
1525
1526 /* Release the lock, we're done changing global state */
1527 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1528
1529 /* Free the old table and update our tracker */
1530 PagesFreed = MiFreePoolPages(OldTable);
1531 ExpRemovePoolTracker('looP', PagesFreed << PAGE_SHIFT, 0);
1532 ExpInsertPoolTracker('looP', ALIGN_UP_BY(NewSizeInBytes, PAGE_SIZE), 0);
1533
1534 return TRUE;
1535 }
1536
1537 BOOLEAN
1538 NTAPI
1539 ExpAddTagForBigPages(IN PVOID Va,
1540 IN ULONG Key,
1541 IN ULONG NumberOfPages,
1542 IN POOL_TYPE PoolType)
1543 {
1544 ULONG Hash, i = 0;
1545 PVOID OldVa;
1546 KIRQL OldIrql;
1547 SIZE_T TableSize;
1548 PPOOL_TRACKER_BIG_PAGES Entry, EntryEnd, EntryStart;
1549 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1550 ASSERT(!(PoolType & SESSION_POOL_MASK));
1551
1552 //
1553 // As the table is expandable, these values must only be read after acquiring
1554 // the lock to avoid a teared access during an expansion
1555 // NOTE: Windows uses a special reader/writer SpinLock to improve
1556 // performance in the common case (add/remove a tracker entry)
1557 //
1558 Retry:
1559 Hash = ExpComputePartialHashForAddress(Va);
1560 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1561 Hash &= PoolBigPageTableHash;
1562 TableSize = PoolBigPageTableSize;
1563
1564 //
1565 // We loop from the current hash bucket to the end of the table, and then
1566 // rollover to hash bucket 0 and keep going from there. If we return back
1567 // to the beginning, then we attempt expansion at the bottom of the loop
1568 //
1569 EntryStart = Entry = &PoolBigPageTable[Hash];
1570 EntryEnd = &PoolBigPageTable[TableSize];
1571 do
1572 {
1573 //
1574 // Make sure that this is a free entry and attempt to atomically make the
1575 // entry busy now
1576 // NOTE: the Interlocked operation cannot fail with an exclusive SpinLock
1577 //
1578 OldVa = Entry->Va;
1579 if (((ULONG_PTR)OldVa & POOL_BIG_TABLE_ENTRY_FREE) &&
1580 (NT_VERIFY(InterlockedCompareExchangePointer(&Entry->Va, Va, OldVa) == OldVa)))
1581 {
1582 //
1583 // We now own this entry, write down the size and the pool tag
1584 //
1585 Entry->Key = Key;
1586 Entry->NumberOfPages = NumberOfPages;
1587
1588 //
1589 // Add one more entry to the count, and see if we're getting within
1590 // 25% of the table size, at which point we'll do an expansion now
1591 // to avoid blocking too hard later on.
1592 //
1593 // Note that we only do this if it's also been the 16th time that we
1594 // keep losing the race or that we are not finding a free entry anymore,
1595 // which implies a massive number of concurrent big pool allocations.
1596 //
1597 InterlockedIncrementUL(&ExpPoolBigEntriesInUse);
1598 if ((i >= 16) && (ExpPoolBigEntriesInUse > (TableSize / 4)))
1599 {
1600 DPRINT("Attempting expansion since we now have %lu entries\n",
1601 ExpPoolBigEntriesInUse);
1602 ASSERT(TableSize == PoolBigPageTableSize);
1603 ExpExpandBigPageTable(OldIrql);
1604 return TRUE;
1605 }
1606
1607 //
1608 // We have our entry, return
1609 //
1610 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1611 return TRUE;
1612 }
1613
1614 //
1615 // We don't have our entry yet, so keep trying, making the entry list
1616 // circular if we reach the last entry. We'll eventually break out of
1617 // the loop once we've rolled over and returned back to our original
1618 // hash bucket
1619 //
1620 i++;
1621 if (++Entry >= EntryEnd) Entry = &PoolBigPageTable[0];
1622 } while (Entry != EntryStart);
1623
1624 //
1625 // This means there's no free hash buckets whatsoever, so we now have
1626 // to attempt expanding the table
1627 //
1628 ASSERT(TableSize == PoolBigPageTableSize);
1629 if (ExpExpandBigPageTable(OldIrql))
1630 {
1631 goto Retry;
1632 }
1633 ExpBigTableExpansionFailed++;
1634 DPRINT1("Big pool table expansion failed\n");
1635 return FALSE;
1636 }
1637
1638 ULONG
1639 NTAPI
1640 ExpFindAndRemoveTagBigPages(IN PVOID Va,
1641 OUT PULONG_PTR BigPages,
1642 IN POOL_TYPE PoolType)
1643 {
1644 BOOLEAN FirstTry = TRUE;
1645 SIZE_T TableSize;
1646 KIRQL OldIrql;
1647 ULONG PoolTag, Hash;
1648 PPOOL_TRACKER_BIG_PAGES Entry;
1649 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1650 ASSERT(!(PoolType & SESSION_POOL_MASK));
1651
1652 //
1653 // As the table is expandable, these values must only be read after acquiring
1654 // the lock to avoid a teared access during an expansion
1655 //
1656 Hash = ExpComputePartialHashForAddress(Va);
1657 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1658 Hash &= PoolBigPageTableHash;
1659 TableSize = PoolBigPageTableSize;
1660
1661 //
1662 // Loop while trying to find this big page allocation
1663 //
1664 while (PoolBigPageTable[Hash].Va != Va)
1665 {
1666 //
1667 // Increment the size until we go past the end of the table
1668 //
1669 if (++Hash >= TableSize)
1670 {
1671 //
1672 // Is this the second time we've tried?
1673 //
1674 if (!FirstTry)
1675 {
1676 //
1677 // This means it was never inserted into the pool table and it
1678 // received the special "BIG" tag -- return that and return 0
1679 // so that the code can ask Mm for the page count instead
1680 //
1681 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1682 *BigPages = 0;
1683 return ' GIB';
1684 }
1685
1686 //
1687 // The first time this happens, reset the hash index and try again
1688 //
1689 Hash = 0;
1690 FirstTry = FALSE;
1691 }
1692 }
1693
1694 //
1695 // Now capture all the information we need from the entry, since after we
1696 // release the lock, the data can change
1697 //
1698 Entry = &PoolBigPageTable[Hash];
1699 *BigPages = Entry->NumberOfPages;
1700 PoolTag = Entry->Key;
1701
1702 //
1703 // Set the free bit, and decrement the number of allocations. Finally, release
1704 // the lock and return the tag that was located
1705 //
1706 InterlockedIncrement((PLONG)&Entry->Va);
1707 InterlockedDecrementUL(&ExpPoolBigEntriesInUse);
1708 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1709 return PoolTag;
1710 }
1711
1712 VOID
1713 NTAPI
1714 ExQueryPoolUsage(OUT PULONG PagedPoolPages,
1715 OUT PULONG NonPagedPoolPages,
1716 OUT PULONG PagedPoolAllocs,
1717 OUT PULONG PagedPoolFrees,
1718 OUT PULONG PagedPoolLookasideHits,
1719 OUT PULONG NonPagedPoolAllocs,
1720 OUT PULONG NonPagedPoolFrees,
1721 OUT PULONG NonPagedPoolLookasideHits)
1722 {
1723 ULONG i;
1724 PPOOL_DESCRIPTOR PoolDesc;
1725
1726 //
1727 // Assume all failures
1728 //
1729 *PagedPoolPages = 0;
1730 *PagedPoolAllocs = 0;
1731 *PagedPoolFrees = 0;
1732
1733 //
1734 // Tally up the totals for all the apged pool
1735 //
1736 for (i = 0; i < ExpNumberOfPagedPools + 1; i++)
1737 {
1738 PoolDesc = ExpPagedPoolDescriptor[i];
1739 *PagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1740 *PagedPoolAllocs += PoolDesc->RunningAllocs;
1741 *PagedPoolFrees += PoolDesc->RunningDeAllocs;
1742 }
1743
1744 //
1745 // The first non-paged pool has a hardcoded well-known descriptor name
1746 //
1747 PoolDesc = &NonPagedPoolDescriptor;
1748 *NonPagedPoolPages = PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1749 *NonPagedPoolAllocs = PoolDesc->RunningAllocs;
1750 *NonPagedPoolFrees = PoolDesc->RunningDeAllocs;
1751
1752 //
1753 // If the system has more than one non-paged pool, copy the other descriptor
1754 // totals as well
1755 //
1756 #if 0
1757 if (ExpNumberOfNonPagedPools > 1)
1758 {
1759 for (i = 0; i < ExpNumberOfNonPagedPools; i++)
1760 {
1761 PoolDesc = ExpNonPagedPoolDescriptor[i];
1762 *NonPagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1763 *NonPagedPoolAllocs += PoolDesc->RunningAllocs;
1764 *NonPagedPoolFrees += PoolDesc->RunningDeAllocs;
1765 }
1766 }
1767 #endif
1768
1769 //
1770 // Get the amount of hits in the system lookaside lists
1771 //
1772 if (!IsListEmpty(&ExPoolLookasideListHead))
1773 {
1774 PLIST_ENTRY ListEntry;
1775
1776 for (ListEntry = ExPoolLookasideListHead.Flink;
1777 ListEntry != &ExPoolLookasideListHead;
1778 ListEntry = ListEntry->Flink)
1779 {
1780 PGENERAL_LOOKASIDE Lookaside;
1781
1782 Lookaside = CONTAINING_RECORD(ListEntry, GENERAL_LOOKASIDE, ListEntry);
1783
1784 if (Lookaside->Type == NonPagedPool)
1785 {
1786 *NonPagedPoolLookasideHits += Lookaside->AllocateHits;
1787 }
1788 else
1789 {
1790 *PagedPoolLookasideHits += Lookaside->AllocateHits;
1791 }
1792 }
1793 }
1794 }
1795
1796 VOID
1797 NTAPI
1798 ExReturnPoolQuota(IN PVOID P)
1799 {
1800 PPOOL_HEADER Entry;
1801 POOL_TYPE PoolType;
1802 USHORT BlockSize;
1803 PEPROCESS Process;
1804
1805 if ((ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) &&
1806 (MmIsSpecialPoolAddress(P)))
1807 {
1808 return;
1809 }
1810
1811 Entry = P;
1812 Entry--;
1813 ASSERT((ULONG_PTR)Entry % POOL_BLOCK_SIZE == 0);
1814
1815 PoolType = Entry->PoolType - 1;
1816 BlockSize = Entry->BlockSize;
1817
1818 if (PoolType & QUOTA_POOL_MASK)
1819 {
1820 Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
1821 ASSERT(Process != NULL);
1822 if (Process)
1823 {
1824 if (Process->Pcb.Header.Type != ProcessObject)
1825 {
1826 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
1827 Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
1828 KeBugCheckEx(BAD_POOL_CALLER,
1829 0x0D,
1830 (ULONG_PTR)P,
1831 Entry->PoolTag,
1832 (ULONG_PTR)Process);
1833 }
1834 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
1835 PsReturnPoolQuota(Process,
1836 PoolType & BASE_POOL_TYPE_MASK,
1837 BlockSize * POOL_BLOCK_SIZE);
1838 ObDereferenceObject(Process);
1839 }
1840 }
1841 }
1842
1843 /* PUBLIC FUNCTIONS ***********************************************************/
1844
1845 /*
1846 * @implemented
1847 */
1848 PVOID
1849 NTAPI
1850 ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
1851 IN SIZE_T NumberOfBytes,
1852 IN ULONG Tag)
1853 {
1854 PPOOL_DESCRIPTOR PoolDesc;
1855 PLIST_ENTRY ListHead;
1856 PPOOL_HEADER Entry, NextEntry, FragmentEntry;
1857 KIRQL OldIrql;
1858 USHORT BlockSize, i;
1859 ULONG OriginalType;
1860 PKPRCB Prcb = KeGetCurrentPrcb();
1861 PGENERAL_LOOKASIDE LookasideList;
1862
1863 //
1864 // Some sanity checks
1865 //
1866 ASSERT(Tag != 0);
1867 ASSERT(Tag != ' GIB');
1868 ASSERT(NumberOfBytes != 0);
1869 ExpCheckPoolIrqlLevel(PoolType, NumberOfBytes, NULL);
1870
1871 //
1872 // Not supported in ReactOS
1873 //
1874 ASSERT(!(PoolType & SESSION_POOL_MASK));
1875
1876 //
1877 // Check if verifier or special pool is enabled
1878 //
1879 if (ExpPoolFlags & (POOL_FLAG_VERIFIER | POOL_FLAG_SPECIAL_POOL))
1880 {
1881 //
1882 // For verifier, we should call the verification routine
1883 //
1884 if (ExpPoolFlags & POOL_FLAG_VERIFIER)
1885 {
1886 DPRINT1("Driver Verifier is not yet supported\n");
1887 }
1888
1889 //
1890 // For special pool, we check if this is a suitable allocation and do
1891 // the special allocation if needed
1892 //
1893 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
1894 {
1895 //
1896 // Check if this is a special pool allocation
1897 //
1898 if (MmUseSpecialPool(NumberOfBytes, Tag))
1899 {
1900 //
1901 // Try to allocate using special pool
1902 //
1903 Entry = MmAllocateSpecialPool(NumberOfBytes, Tag, PoolType, 2);
1904 if (Entry) return Entry;
1905 }
1906 }
1907 }
1908
1909 //
1910 // Get the pool type and its corresponding vector for this request
1911 //
1912 OriginalType = PoolType;
1913 PoolType = PoolType & BASE_POOL_TYPE_MASK;
1914 PoolDesc = PoolVector[PoolType];
1915 ASSERT(PoolDesc != NULL);
1916
1917 //
1918 // Check if this is a big page allocation
1919 //
1920 if (NumberOfBytes > POOL_MAX_ALLOC)
1921 {
1922 //
1923 // Allocate pages for it
1924 //
1925 Entry = MiAllocatePoolPages(OriginalType, NumberOfBytes);
1926 if (!Entry)
1927 {
1928 #if DBG
1929 //
1930 // Out of memory, display current consumption
1931 // Let's consider that if the caller wanted more
1932 // than a hundred pages, that's a bogus caller
1933 // and we are not out of memory. Dump at most
1934 // once a second to avoid spamming the log.
1935 //
1936 if (NumberOfBytes < 100 * PAGE_SIZE &&
1937 KeQueryInterruptTime() >= MiLastPoolDumpTime + 10000000)
1938 {
1939 MiDumpPoolConsumers(FALSE, 0, 0, 0);
1940 MiLastPoolDumpTime = KeQueryInterruptTime();
1941 }
1942 #endif
1943
1944 //
1945 // Must succeed pool is deprecated, but still supported. These allocation
1946 // failures must cause an immediate bugcheck
1947 //
1948 if (OriginalType & MUST_SUCCEED_POOL_MASK)
1949 {
1950 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1951 NumberOfBytes,
1952 NonPagedPoolDescriptor.TotalPages,
1953 NonPagedPoolDescriptor.TotalBigPages,
1954 0);
1955 }
1956
1957 //
1958 // Internal debugging
1959 //
1960 ExPoolFailures++;
1961
1962 //
1963 // This flag requests printing failures, and can also further specify
1964 // breaking on failures
1965 //
1966 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
1967 {
1968 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
1969 NumberOfBytes,
1970 OriginalType);
1971 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
1972 }
1973
1974 //
1975 // Finally, this flag requests an exception, which we are more than
1976 // happy to raise!
1977 //
1978 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
1979 {
1980 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
1981 }
1982
1983 return NULL;
1984 }
1985
1986 //
1987 // Increment required counters
1988 //
1989 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
1990 (LONG)BYTES_TO_PAGES(NumberOfBytes));
1991 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, NumberOfBytes);
1992 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
1993
1994 //
1995 // Add a tag for the big page allocation and switch to the generic "BIG"
1996 // tag if we failed to do so, then insert a tracker for this alloation.
1997 //
1998 if (!ExpAddTagForBigPages(Entry,
1999 Tag,
2000 (ULONG)BYTES_TO_PAGES(NumberOfBytes),
2001 OriginalType))
2002 {
2003 Tag = ' GIB';
2004 }
2005 ExpInsertPoolTracker(Tag, ROUND_TO_PAGES(NumberOfBytes), OriginalType);
2006 return Entry;
2007 }
2008
2009 //
2010 // Should never request 0 bytes from the pool, but since so many drivers do
2011 // it, we'll just assume they want 1 byte, based on NT's similar behavior
2012 //
2013 if (!NumberOfBytes) NumberOfBytes = 1;
2014
2015 //
2016 // A pool allocation is defined by its data, a linked list to connect it to
2017 // the free list (if necessary), and a pool header to store accounting info.
2018 // Calculate this size, then convert it into a block size (units of pool
2019 // headers)
2020 //
2021 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
2022 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
2023 // the direct allocation of pages.
2024 //
2025 i = (USHORT)((NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1))
2026 / POOL_BLOCK_SIZE);
2027 ASSERT(i < POOL_LISTS_PER_PAGE);
2028
2029 //
2030 // Handle lookaside list optimization for both paged and nonpaged pool
2031 //
2032 if (i <= NUMBER_POOL_LOOKASIDE_LISTS)
2033 {
2034 //
2035 // Try popping it from the per-CPU lookaside list
2036 //
2037 LookasideList = (PoolType == PagedPool) ?
2038 Prcb->PPPagedLookasideList[i - 1].P :
2039 Prcb->PPNPagedLookasideList[i - 1].P;
2040 LookasideList->TotalAllocates++;
2041 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
2042 if (!Entry)
2043 {
2044 //
2045 // We failed, try popping it from the global list
2046 //
2047 LookasideList = (PoolType == PagedPool) ?
2048 Prcb->PPPagedLookasideList[i - 1].L :
2049 Prcb->PPNPagedLookasideList[i - 1].L;
2050 LookasideList->TotalAllocates++;
2051 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
2052 }
2053
2054 //
2055 // If we were able to pop it, update the accounting and return the block
2056 //
2057 if (Entry)
2058 {
2059 LookasideList->AllocateHits++;
2060
2061 //
2062 // Get the real entry, write down its pool type, and track it
2063 //
2064 Entry--;
2065 Entry->PoolType = OriginalType + 1;
2066 ExpInsertPoolTracker(Tag,
2067 Entry->BlockSize * POOL_BLOCK_SIZE,
2068 OriginalType);
2069
2070 //
2071 // Return the pool allocation
2072 //
2073 Entry->PoolTag = Tag;
2074 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
2075 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
2076 return POOL_FREE_BLOCK(Entry);
2077 }
2078 }
2079
2080 //
2081 // Loop in the free lists looking for a block if this size. Start with the
2082 // list optimized for this kind of size lookup
2083 //
2084 ListHead = &PoolDesc->ListHeads[i];
2085 do
2086 {
2087 //
2088 // Are there any free entries available on this list?
2089 //
2090 if (!ExpIsPoolListEmpty(ListHead))
2091 {
2092 //
2093 // Acquire the pool lock now
2094 //
2095 OldIrql = ExLockPool(PoolDesc);
2096
2097 //
2098 // And make sure the list still has entries
2099 //
2100 if (ExpIsPoolListEmpty(ListHead))
2101 {
2102 //
2103 // Someone raced us (and won) before we had a chance to acquire
2104 // the lock.
2105 //
2106 // Try again!
2107 //
2108 ExUnlockPool(PoolDesc, OldIrql);
2109 continue;
2110 }
2111
2112 //
2113 // Remove a free entry from the list
2114 // Note that due to the way we insert free blocks into multiple lists
2115 // there is a guarantee that any block on this list will either be
2116 // of the correct size, or perhaps larger.
2117 //
2118 ExpCheckPoolLinks(ListHead);
2119 Entry = POOL_ENTRY(ExpRemovePoolHeadList(ListHead));
2120 ExpCheckPoolLinks(ListHead);
2121 ExpCheckPoolBlocks(Entry);
2122 ASSERT(Entry->BlockSize >= i);
2123 ASSERT(Entry->PoolType == 0);
2124
2125 //
2126 // Check if this block is larger that what we need. The block could
2127 // not possibly be smaller, due to the reason explained above (and
2128 // we would've asserted on a checked build if this was the case).
2129 //
2130 if (Entry->BlockSize != i)
2131 {
2132 //
2133 // Is there an entry before this one?
2134 //
2135 if (Entry->PreviousSize == 0)
2136 {
2137 //
2138 // There isn't anyone before us, so take the next block and
2139 // turn it into a fragment that contains the leftover data
2140 // that we don't need to satisfy the caller's request
2141 //
2142 FragmentEntry = POOL_BLOCK(Entry, i);
2143 FragmentEntry->BlockSize = Entry->BlockSize - i;
2144
2145 //
2146 // And make it point back to us
2147 //
2148 FragmentEntry->PreviousSize = i;
2149
2150 //
2151 // Now get the block that follows the new fragment and check
2152 // if it's still on the same page as us (and not at the end)
2153 //
2154 NextEntry = POOL_NEXT_BLOCK(FragmentEntry);
2155 if (PAGE_ALIGN(NextEntry) != NextEntry)
2156 {
2157 //
2158 // Adjust this next block to point to our newly created
2159 // fragment block
2160 //
2161 NextEntry->PreviousSize = FragmentEntry->BlockSize;
2162 }
2163 }
2164 else
2165 {
2166 //
2167 // There is a free entry before us, which we know is smaller
2168 // so we'll make this entry the fragment instead
2169 //
2170 FragmentEntry = Entry;
2171
2172 //
2173 // And then we'll remove from it the actual size required.
2174 // Now the entry is a leftover free fragment
2175 //
2176 Entry->BlockSize -= i;
2177
2178 //
2179 // Now let's go to the next entry after the fragment (which
2180 // used to point to our original free entry) and make it
2181 // reference the new fragment entry instead.
2182 //
2183 // This is the entry that will actually end up holding the
2184 // allocation!
2185 //
2186 Entry = POOL_NEXT_BLOCK(Entry);
2187 Entry->PreviousSize = FragmentEntry->BlockSize;
2188
2189 //
2190 // And now let's go to the entry after that one and check if
2191 // it's still on the same page, and not at the end
2192 //
2193 NextEntry = POOL_BLOCK(Entry, i);
2194 if (PAGE_ALIGN(NextEntry) != NextEntry)
2195 {
2196 //
2197 // Make it reference the allocation entry
2198 //
2199 NextEntry->PreviousSize = i;
2200 }
2201 }
2202
2203 //
2204 // Now our (allocation) entry is the right size
2205 //
2206 Entry->BlockSize = i;
2207
2208 //
2209 // And the next entry is now the free fragment which contains
2210 // the remaining difference between how big the original entry
2211 // was, and the actual size the caller needs/requested.
2212 //
2213 FragmentEntry->PoolType = 0;
2214 BlockSize = FragmentEntry->BlockSize;
2215
2216 //
2217 // Now check if enough free bytes remained for us to have a
2218 // "full" entry, which contains enough bytes for a linked list
2219 // and thus can be used for allocations (up to 8 bytes...)
2220 //
2221 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2222 if (BlockSize != 1)
2223 {
2224 //
2225 // Insert the free entry into the free list for this size
2226 //
2227 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2228 POOL_FREE_BLOCK(FragmentEntry));
2229 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2230 }
2231 }
2232
2233 //
2234 // We have found an entry for this allocation, so set the pool type
2235 // and release the lock since we're done
2236 //
2237 Entry->PoolType = OriginalType + 1;
2238 ExpCheckPoolBlocks(Entry);
2239 ExUnlockPool(PoolDesc, OldIrql);
2240
2241 //
2242 // Increment required counters
2243 //
2244 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
2245 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
2246
2247 //
2248 // Track this allocation
2249 //
2250 ExpInsertPoolTracker(Tag,
2251 Entry->BlockSize * POOL_BLOCK_SIZE,
2252 OriginalType);
2253
2254 //
2255 // Return the pool allocation
2256 //
2257 Entry->PoolTag = Tag;
2258 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
2259 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
2260 return POOL_FREE_BLOCK(Entry);
2261 }
2262 } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
2263
2264 //
2265 // There were no free entries left, so we have to allocate a new fresh page
2266 //
2267 Entry = MiAllocatePoolPages(OriginalType, PAGE_SIZE);
2268 if (!Entry)
2269 {
2270 #if DBG
2271 //
2272 // Out of memory, display current consumption
2273 // Let's consider that if the caller wanted more
2274 // than a hundred pages, that's a bogus caller
2275 // and we are not out of memory. Dump at most
2276 // once a second to avoid spamming the log.
2277 //
2278 if (NumberOfBytes < 100 * PAGE_SIZE &&
2279 KeQueryInterruptTime() >= MiLastPoolDumpTime + 10000000)
2280 {
2281 MiDumpPoolConsumers(FALSE, 0, 0, 0);
2282 MiLastPoolDumpTime = KeQueryInterruptTime();
2283 }
2284 #endif
2285
2286 //
2287 // Must succeed pool is deprecated, but still supported. These allocation
2288 // failures must cause an immediate bugcheck
2289 //
2290 if (OriginalType & MUST_SUCCEED_POOL_MASK)
2291 {
2292 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
2293 PAGE_SIZE,
2294 NonPagedPoolDescriptor.TotalPages,
2295 NonPagedPoolDescriptor.TotalBigPages,
2296 0);
2297 }
2298
2299 //
2300 // Internal debugging
2301 //
2302 ExPoolFailures++;
2303
2304 //
2305 // This flag requests printing failures, and can also further specify
2306 // breaking on failures
2307 //
2308 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
2309 {
2310 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
2311 NumberOfBytes,
2312 OriginalType);
2313 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
2314 }
2315
2316 //
2317 // Finally, this flag requests an exception, which we are more than
2318 // happy to raise!
2319 //
2320 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
2321 {
2322 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
2323 }
2324
2325 //
2326 // Return NULL to the caller in all other cases
2327 //
2328 return NULL;
2329 }
2330
2331 //
2332 // Setup the entry data
2333 //
2334 Entry->Ulong1 = 0;
2335 Entry->BlockSize = i;
2336 Entry->PoolType = OriginalType + 1;
2337
2338 //
2339 // This page will have two entries -- one for the allocation (which we just
2340 // created above), and one for the remaining free bytes, which we're about
2341 // to create now. The free bytes are the whole page minus what was allocated
2342 // and then converted into units of block headers.
2343 //
2344 BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i;
2345 FragmentEntry = POOL_BLOCK(Entry, i);
2346 FragmentEntry->Ulong1 = 0;
2347 FragmentEntry->BlockSize = BlockSize;
2348 FragmentEntry->PreviousSize = i;
2349
2350 //
2351 // Increment required counters
2352 //
2353 InterlockedIncrement((PLONG)&PoolDesc->TotalPages);
2354 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
2355
2356 //
2357 // Now check if enough free bytes remained for us to have a "full" entry,
2358 // which contains enough bytes for a linked list and thus can be used for
2359 // allocations (up to 8 bytes...)
2360 //
2361 if (FragmentEntry->BlockSize != 1)
2362 {
2363 //
2364 // Excellent -- acquire the pool lock
2365 //
2366 OldIrql = ExLockPool(PoolDesc);
2367
2368 //
2369 // And insert the free entry into the free list for this block size
2370 //
2371 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2372 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2373 POOL_FREE_BLOCK(FragmentEntry));
2374 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2375
2376 //
2377 // Release the pool lock
2378 //
2379 ExpCheckPoolBlocks(Entry);
2380 ExUnlockPool(PoolDesc, OldIrql);
2381 }
2382 else
2383 {
2384 //
2385 // Simply do a sanity check
2386 //
2387 ExpCheckPoolBlocks(Entry);
2388 }
2389
2390 //
2391 // Increment performance counters and track this allocation
2392 //
2393 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
2394 ExpInsertPoolTracker(Tag,
2395 Entry->BlockSize * POOL_BLOCK_SIZE,
2396 OriginalType);
2397
2398 //
2399 // And return the pool allocation
2400 //
2401 ExpCheckPoolBlocks(Entry);
2402 Entry->PoolTag = Tag;
2403 return POOL_FREE_BLOCK(Entry);
2404 }
2405
2406 /*
2407 * @implemented
2408 */
2409 PVOID
2410 NTAPI
2411 ExAllocatePool(POOL_TYPE PoolType,
2412 SIZE_T NumberOfBytes)
2413 {
2414 ULONG Tag = TAG_NONE;
2415 #if 0 && DBG
2416 PLDR_DATA_TABLE_ENTRY LdrEntry;
2417
2418 /* Use the first four letters of the driver name, or "None" if unavailable */
2419 LdrEntry = KeGetCurrentIrql() <= APC_LEVEL
2420 ? MiLookupDataTableEntry(_ReturnAddress())
2421 : NULL;
2422 if (LdrEntry)
2423 {
2424 ULONG i;
2425 Tag = 0;
2426 for (i = 0; i < min(4, LdrEntry->BaseDllName.Length / sizeof(WCHAR)); i++)
2427 Tag = Tag >> 8 | (LdrEntry->BaseDllName.Buffer[i] & 0xff) << 24;
2428 for (; i < 4; i++)
2429 Tag = Tag >> 8 | ' ' << 24;
2430 }
2431 #endif
2432 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2433 }
2434
2435 /*
2436 * @implemented
2437 */
2438 VOID
2439 NTAPI
2440 ExFreePoolWithTag(IN PVOID P,
2441 IN ULONG TagToFree)
2442 {
2443 PPOOL_HEADER Entry, NextEntry;
2444 USHORT BlockSize;
2445 KIRQL OldIrql;
2446 POOL_TYPE PoolType;
2447 PPOOL_DESCRIPTOR PoolDesc;
2448 ULONG Tag;
2449 BOOLEAN Combined = FALSE;
2450 PFN_NUMBER PageCount, RealPageCount;
2451 PKPRCB Prcb = KeGetCurrentPrcb();
2452 PGENERAL_LOOKASIDE LookasideList;
2453 PEPROCESS Process;
2454
2455 //
2456 // Check if any of the debug flags are enabled
2457 //
2458 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
2459 POOL_FLAG_CHECK_WORKERS |
2460 POOL_FLAG_CHECK_RESOURCES |
2461 POOL_FLAG_VERIFIER |
2462 POOL_FLAG_CHECK_DEADLOCK |
2463 POOL_FLAG_SPECIAL_POOL))
2464 {
2465 //
2466 // Check if special pool is enabled
2467 //
2468 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
2469 {
2470 //
2471 // Check if it was allocated from a special pool
2472 //
2473 if (MmIsSpecialPoolAddress(P))
2474 {
2475 //
2476 // Was deadlock verification also enabled? We can do some extra
2477 // checks at this point
2478 //
2479 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2480 {
2481 DPRINT1("Verifier not yet supported\n");
2482 }
2483
2484 //
2485 // It is, so handle it via special pool free routine
2486 //
2487 MmFreeSpecialPool(P);
2488 return;
2489 }
2490 }
2491
2492 //
2493 // For non-big page allocations, we'll do a bunch of checks in here
2494 //
2495 if (PAGE_ALIGN(P) != P)
2496 {
2497 //
2498 // Get the entry for this pool allocation
2499 // The pointer math here may look wrong or confusing, but it is quite right
2500 //
2501 Entry = P;
2502 Entry--;
2503
2504 //
2505 // Get the pool type
2506 //
2507 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2508
2509 //
2510 // FIXME: Many other debugging checks go here
2511 //
2512 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2513 }
2514 }
2515
2516 //
2517 // Check if this is a big page allocation
2518 //
2519 if (PAGE_ALIGN(P) == P)
2520 {
2521 //
2522 // We need to find the tag for it, so first we need to find out what
2523 // kind of allocation this was (paged or nonpaged), then we can go
2524 // ahead and try finding the tag for it. Remember to get rid of the
2525 // PROTECTED_POOL tag if it's found.
2526 //
2527 // Note that if at insertion time, we failed to add the tag for a big
2528 // pool allocation, we used a special tag called 'BIG' to identify the
2529 // allocation, and we may get this tag back. In this scenario, we must
2530 // manually get the size of the allocation by actually counting through
2531 // the PFN database.
2532 //
2533 PoolType = MmDeterminePoolType(P);
2534 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2535 Tag = ExpFindAndRemoveTagBigPages(P, &PageCount, PoolType);
2536 if (!Tag)
2537 {
2538 DPRINT1("We do not know the size of this allocation. This is not yet supported\n");
2539 ASSERT(Tag == ' GIB');
2540 PageCount = 1; // We are going to lie! This might screw up accounting?
2541 }
2542 else if (Tag & PROTECTED_POOL)
2543 {
2544 Tag &= ~PROTECTED_POOL;
2545 }
2546
2547 //
2548 // Check block tag
2549 //
2550 if (TagToFree && TagToFree != Tag)
2551 {
2552 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2553 #if DBG
2554 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2555 #endif
2556 }
2557
2558 //
2559 // We have our tag and our page count, so we can go ahead and remove this
2560 // tracker now
2561 //
2562 ExpRemovePoolTracker(Tag, PageCount << PAGE_SHIFT, PoolType);
2563
2564 //
2565 // Check if any of the debug flags are enabled
2566 //
2567 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
2568 POOL_FLAG_CHECK_WORKERS |
2569 POOL_FLAG_CHECK_RESOURCES |
2570 POOL_FLAG_CHECK_DEADLOCK))
2571 {
2572 //
2573 // Was deadlock verification also enabled? We can do some extra
2574 // checks at this point
2575 //
2576 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2577 {
2578 DPRINT1("Verifier not yet supported\n");
2579 }
2580
2581 //
2582 // FIXME: Many debugging checks go here
2583 //
2584 }
2585
2586 //
2587 // Update counters
2588 //
2589 PoolDesc = PoolVector[PoolType];
2590 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2591 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes,
2592 -(LONG_PTR)(PageCount << PAGE_SHIFT));
2593
2594 //
2595 // Do the real free now and update the last counter with the big page count
2596 //
2597 RealPageCount = MiFreePoolPages(P);
2598 ASSERT(RealPageCount == PageCount);
2599 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
2600 -(LONG)RealPageCount);
2601 return;
2602 }
2603
2604 //
2605 // Get the entry for this pool allocation
2606 // The pointer math here may look wrong or confusing, but it is quite right
2607 //
2608 Entry = P;
2609 Entry--;
2610 ASSERT((ULONG_PTR)Entry % POOL_BLOCK_SIZE == 0);
2611
2612 //
2613 // Get the size of the entry, and it's pool type, then load the descriptor
2614 // for this pool type
2615 //
2616 BlockSize = Entry->BlockSize;
2617 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2618 PoolDesc = PoolVector[PoolType];
2619
2620 //
2621 // Make sure that the IRQL makes sense
2622 //
2623 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2624
2625 //
2626 // Get the pool tag and get rid of the PROTECTED_POOL flag
2627 //
2628 Tag = Entry->PoolTag;
2629 if (Tag & PROTECTED_POOL) Tag &= ~PROTECTED_POOL;
2630
2631 //
2632 // Check block tag
2633 //
2634 if (TagToFree && TagToFree != Tag)
2635 {
2636 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2637 #if DBG
2638 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2639 #endif
2640 }
2641
2642 //
2643 // Track the removal of this allocation
2644 //
2645 ExpRemovePoolTracker(Tag,
2646 BlockSize * POOL_BLOCK_SIZE,
2647 Entry->PoolType - 1);
2648
2649 //
2650 // Release pool quota, if any
2651 //
2652 if ((Entry->PoolType - 1) & QUOTA_POOL_MASK)
2653 {
2654 Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
2655 if (Process)
2656 {
2657 if (Process->Pcb.Header.Type != ProcessObject)
2658 {
2659 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
2660 Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
2661 KeBugCheckEx(BAD_POOL_CALLER,
2662 0x0D,
2663 (ULONG_PTR)P,
2664 Tag,
2665 (ULONG_PTR)Process);
2666 }
2667 PsReturnPoolQuota(Process, PoolType, BlockSize * POOL_BLOCK_SIZE);
2668 ObDereferenceObject(Process);
2669 }
2670 }
2671
2672 //
2673 // Is this allocation small enough to have come from a lookaside list?
2674 //
2675 if (BlockSize <= NUMBER_POOL_LOOKASIDE_LISTS)
2676 {
2677 //
2678 // Try pushing it into the per-CPU lookaside list
2679 //
2680 LookasideList = (PoolType == PagedPool) ?
2681 Prcb->PPPagedLookasideList[BlockSize - 1].P :
2682 Prcb->PPNPagedLookasideList[BlockSize - 1].P;
2683 LookasideList->TotalFrees++;
2684 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2685 {
2686 LookasideList->FreeHits++;
2687 InterlockedPushEntrySList(&LookasideList->ListHead, P);
2688 return;
2689 }
2690
2691 //
2692 // We failed, try to push it into the global lookaside list
2693 //
2694 LookasideList = (PoolType == PagedPool) ?
2695 Prcb->PPPagedLookasideList[BlockSize - 1].L :
2696 Prcb->PPNPagedLookasideList[BlockSize - 1].L;
2697 LookasideList->TotalFrees++;
2698 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2699 {
2700 LookasideList->FreeHits++;
2701 InterlockedPushEntrySList(&LookasideList->ListHead, P);
2702 return;
2703 }
2704 }
2705
2706 //
2707 // Get the pointer to the next entry
2708 //
2709 NextEntry = POOL_BLOCK(Entry, BlockSize);
2710
2711 //
2712 // Update performance counters
2713 //
2714 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2715 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, -BlockSize * POOL_BLOCK_SIZE);
2716
2717 //
2718 // Acquire the pool lock
2719 //
2720 OldIrql = ExLockPool(PoolDesc);
2721
2722 //
2723 // Check if the next allocation is at the end of the page
2724 //
2725 ExpCheckPoolBlocks(Entry);
2726 if (PAGE_ALIGN(NextEntry) != NextEntry)
2727 {
2728 //
2729 // We may be able to combine the block if it's free
2730 //
2731 if (NextEntry->PoolType == 0)
2732 {
2733 //
2734 // The next block is free, so we'll do a combine
2735 //
2736 Combined = TRUE;
2737
2738 //
2739 // Make sure there's actual data in the block -- anything smaller
2740 // than this means we only have the header, so there's no linked list
2741 // for us to remove
2742 //
2743 if ((NextEntry->BlockSize != 1))
2744 {
2745 //
2746 // The block is at least big enough to have a linked list, so go
2747 // ahead and remove it
2748 //
2749 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2750 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2751 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2752 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2753 }
2754
2755 //
2756 // Our entry is now combined with the next entry
2757 //
2758 Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
2759 }
2760 }
2761
2762 //
2763 // Now check if there was a previous entry on the same page as us
2764 //
2765 if (Entry->PreviousSize)
2766 {
2767 //
2768 // Great, grab that entry and check if it's free
2769 //
2770 NextEntry = POOL_PREV_BLOCK(Entry);
2771 if (NextEntry->PoolType == 0)
2772 {
2773 //
2774 // It is, so we can do a combine
2775 //
2776 Combined = TRUE;
2777
2778 //
2779 // Make sure there's actual data in the block -- anything smaller
2780 // than this means we only have the header so there's no linked list
2781 // for us to remove
2782 //
2783 if ((NextEntry->BlockSize != 1))
2784 {
2785 //
2786 // The block is at least big enough to have a linked list, so go
2787 // ahead and remove it
2788 //
2789 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2790 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2791 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2792 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2793 }
2794
2795 //
2796 // Combine our original block (which might've already been combined
2797 // with the next block), into the previous block
2798 //
2799 NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
2800
2801 //
2802 // And now we'll work with the previous block instead
2803 //
2804 Entry = NextEntry;
2805 }
2806 }
2807
2808 //
2809 // By now, it may have been possible for our combined blocks to actually
2810 // have made up a full page (if there were only 2-3 allocations on the
2811 // page, they could've all been combined).
2812 //
2813 if ((PAGE_ALIGN(Entry) == Entry) &&
2814 (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry)) == POOL_NEXT_BLOCK(Entry)))
2815 {
2816 //
2817 // In this case, release the pool lock, update the performance counter,
2818 // and free the page
2819 //
2820 ExUnlockPool(PoolDesc, OldIrql);
2821 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalPages, -1);
2822 MiFreePoolPages(Entry);
2823 return;
2824 }
2825
2826 //
2827 // Otherwise, we now have a free block (or a combination of 2 or 3)
2828 //
2829 Entry->PoolType = 0;
2830 BlockSize = Entry->BlockSize;
2831 ASSERT(BlockSize != 1);
2832
2833 //
2834 // Check if we actually did combine it with anyone
2835 //
2836 if (Combined)
2837 {
2838 //
2839 // Get the first combined block (either our original to begin with, or
2840 // the one after the original, depending if we combined with the previous)
2841 //
2842 NextEntry = POOL_NEXT_BLOCK(Entry);
2843
2844 //
2845 // As long as the next block isn't on a page boundary, have it point
2846 // back to us
2847 //
2848 if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
2849 }
2850
2851 //
2852 // Insert this new free block, and release the pool lock
2853 //
2854 ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
2855 ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry));
2856 ExUnlockPool(PoolDesc, OldIrql);
2857 }
2858
2859 /*
2860 * @implemented
2861 */
2862 VOID
2863 NTAPI
2864 ExFreePool(PVOID P)
2865 {
2866 //
2867 // Just free without checking for the tag
2868 //
2869 ExFreePoolWithTag(P, 0);
2870 }
2871
2872 /*
2873 * @unimplemented
2874 */
2875 SIZE_T
2876 NTAPI
2877 ExQueryPoolBlockSize(IN PVOID PoolBlock,
2878 OUT PBOOLEAN QuotaCharged)
2879 {
2880 //
2881 // Not implemented
2882 //
2883 UNIMPLEMENTED;
2884 return FALSE;
2885 }
2886
2887 /*
2888 * @implemented
2889 */
2890
2891 PVOID
2892 NTAPI
2893 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType,
2894 IN SIZE_T NumberOfBytes)
2895 {
2896 //
2897 // Allocate the pool
2898 //
2899 return ExAllocatePoolWithQuotaTag(PoolType, NumberOfBytes, TAG_NONE);
2900 }
2901
2902 /*
2903 * @implemented
2904 */
2905 PVOID
2906 NTAPI
2907 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType,
2908 IN SIZE_T NumberOfBytes,
2909 IN ULONG Tag,
2910 IN EX_POOL_PRIORITY Priority)
2911 {
2912 PVOID Buffer;
2913
2914 //
2915 // Allocate the pool
2916 //
2917 Buffer = ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2918 if (Buffer == NULL)
2919 {
2920 UNIMPLEMENTED;
2921 }
2922
2923 return Buffer;
2924 }
2925
2926 /*
2927 * @implemented
2928 */
2929 PVOID
2930 NTAPI
2931 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType,
2932 IN SIZE_T NumberOfBytes,
2933 IN ULONG Tag)
2934 {
2935 BOOLEAN Raise = TRUE;
2936 PVOID Buffer;
2937 PPOOL_HEADER Entry;
2938 NTSTATUS Status;
2939 PEPROCESS Process = PsGetCurrentProcess();
2940
2941 //
2942 // Check if we should fail instead of raising an exception
2943 //
2944 if (PoolType & POOL_QUOTA_FAIL_INSTEAD_OF_RAISE)
2945 {
2946 Raise = FALSE;
2947 PoolType &= ~POOL_QUOTA_FAIL_INSTEAD_OF_RAISE;
2948 }
2949
2950 //
2951 // Inject the pool quota mask
2952 //
2953 PoolType += QUOTA_POOL_MASK;
2954
2955 //
2956 // Check if we have enough space to add the quota owner process, as long as
2957 // this isn't the system process, which never gets charged quota
2958 //
2959 ASSERT(NumberOfBytes != 0);
2960 if ((NumberOfBytes <= (PAGE_SIZE - POOL_BLOCK_SIZE - sizeof(PVOID))) &&
2961 (Process != PsInitialSystemProcess))
2962 {
2963 //
2964 // Add space for our EPROCESS pointer
2965 //
2966 NumberOfBytes += sizeof(PEPROCESS);
2967 }
2968 else
2969 {
2970 //
2971 // We won't be able to store the pointer, so don't use quota for this
2972 //
2973 PoolType -= QUOTA_POOL_MASK;
2974 }
2975
2976 //
2977 // Allocate the pool buffer now
2978 //
2979 Buffer = ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2980
2981 //
2982 // If the buffer is page-aligned, this is a large page allocation and we
2983 // won't touch it
2984 //
2985 if (PAGE_ALIGN(Buffer) != Buffer)
2986 {
2987 //
2988 // Also if special pool is enabled, and this was allocated from there,
2989 // we won't touch it either
2990 //
2991 if ((ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) &&
2992 (MmIsSpecialPoolAddress(Buffer)))
2993 {
2994 return Buffer;
2995 }
2996
2997 //
2998 // If it wasn't actually allocated with quota charges, ignore it too
2999 //
3000 if (!(PoolType & QUOTA_POOL_MASK)) return Buffer;
3001
3002 //
3003 // If this is the system process, we don't charge quota, so ignore
3004 //
3005 if (Process == PsInitialSystemProcess) return Buffer;
3006
3007 //
3008 // Actually go and charge quota for the process now
3009 //
3010 Entry = POOL_ENTRY(Buffer);
3011 Status = PsChargeProcessPoolQuota(Process,
3012 PoolType & BASE_POOL_TYPE_MASK,
3013 Entry->BlockSize * POOL_BLOCK_SIZE);
3014 if (!NT_SUCCESS(Status))
3015 {
3016 //
3017 // Quota failed, back out the allocation, clear the owner, and fail
3018 //
3019 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
3020 ExFreePoolWithTag(Buffer, Tag);
3021 if (Raise) RtlRaiseStatus(Status);
3022 return NULL;
3023 }
3024
3025 //
3026 // Quota worked, write the owner and then reference it before returning
3027 //
3028 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = Process;
3029 ObReferenceObject(Process);
3030 }
3031 else if (!(Buffer) && (Raise))
3032 {
3033 //
3034 // The allocation failed, raise an error if we are in raise mode
3035 //
3036 RtlRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
3037 }
3038
3039 //
3040 // Return the allocated buffer
3041 //
3042 return Buffer;
3043 }
3044
3045 /* EOF */