[SHELL32]
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / expool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include "../ARM3/miarm.h"
17
18 #undef ExAllocatePoolWithQuota
19 #undef ExAllocatePoolWithQuotaTag
20
21 /* GLOBALS ********************************************************************/
22
23 ULONG ExpNumberOfPagedPools;
24 POOL_DESCRIPTOR NonPagedPoolDescriptor;
25 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16 + 1];
26 PPOOL_DESCRIPTOR PoolVector[2];
27 PVOID PoolTrackTable;
28 PKGUARDED_MUTEX ExpPagedPoolMutex;
29
30 /* Pool block/header/list access macros */
31 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
32 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER))
33 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
34 #define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
35 #define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize))
36
37 /*
38 * Pool list access debug macros, similar to Arthur's pfnlist.c work.
39 * Microsoft actually implements similar checks in the Windows Server 2003 SP1
40 * pool code, but only for checked builds.
41 *
42 * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
43 * that these checks are done even on retail builds, due to the increasing
44 * number of kernel-mode attacks which depend on dangling list pointers and other
45 * kinds of list-based attacks.
46 *
47 * For now, I will leave these checks on all the time, but later they are likely
48 * to be DBG-only, at least until there are enough kernel-mode security attacks
49 * against ReactOS to warrant the performance hit.
50 *
51 * For now, these are not made inline, so we can get good stack traces.
52 */
53 PLIST_ENTRY
54 NTAPI
55 ExpDecodePoolLink(IN PLIST_ENTRY Link)
56 {
57 return (PLIST_ENTRY)((ULONG_PTR)Link & ~1);
58 }
59
60 PLIST_ENTRY
61 NTAPI
62 ExpEncodePoolLink(IN PLIST_ENTRY Link)
63 {
64 return (PLIST_ENTRY)((ULONG_PTR)Link | 1);
65 }
66
67 VOID
68 NTAPI
69 ExpCheckPoolLinks(IN PLIST_ENTRY ListHead)
70 {
71 if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) ||
72 (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead))
73 {
74 KeBugCheckEx(BAD_POOL_HEADER,
75 3,
76 (ULONG_PTR)ListHead,
77 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink),
78 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink));
79 }
80 }
81
82 VOID
83 NTAPI
84 ExpInitializePoolListHead(IN PLIST_ENTRY ListHead)
85 {
86 ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead);
87 }
88
89 BOOLEAN
90 NTAPI
91 ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead)
92 {
93 return (ExpDecodePoolLink(ListHead->Flink) == ListHead);
94 }
95
96 VOID
97 NTAPI
98 ExpRemovePoolEntryList(IN PLIST_ENTRY Entry)
99 {
100 PLIST_ENTRY Blink, Flink;
101 Flink = ExpDecodePoolLink(Entry->Flink);
102 Blink = ExpDecodePoolLink(Entry->Blink);
103 Flink->Blink = ExpEncodePoolLink(Blink);
104 Blink->Flink = ExpEncodePoolLink(Flink);
105 }
106
107 PLIST_ENTRY
108 NTAPI
109 ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead)
110 {
111 PLIST_ENTRY Entry, Flink;
112 Entry = ExpDecodePoolLink(ListHead->Flink);
113 Flink = ExpDecodePoolLink(Entry->Flink);
114 ListHead->Flink = ExpEncodePoolLink(Flink);
115 Flink->Blink = ExpEncodePoolLink(ListHead);
116 return Entry;
117 }
118
119 PLIST_ENTRY
120 NTAPI
121 ExpRemovePoolTailList(IN PLIST_ENTRY ListHead)
122 {
123 PLIST_ENTRY Entry, Blink;
124 Entry = ExpDecodePoolLink(ListHead->Blink);
125 Blink = ExpDecodePoolLink(Entry->Blink);
126 ListHead->Blink = ExpEncodePoolLink(Blink);
127 Blink->Flink = ExpEncodePoolLink(ListHead);
128 return Entry;
129 }
130
131 VOID
132 NTAPI
133 ExpInsertPoolTailList(IN PLIST_ENTRY ListHead,
134 IN PLIST_ENTRY Entry)
135 {
136 PLIST_ENTRY Blink;
137 ExpCheckPoolLinks(ListHead);
138 Blink = ExpDecodePoolLink(ListHead->Blink);
139 Entry->Flink = ExpEncodePoolLink(ListHead);
140 Entry->Blink = ExpEncodePoolLink(Blink);
141 Blink->Flink = ExpEncodePoolLink(Entry);
142 ListHead->Blink = ExpEncodePoolLink(Entry);
143 ExpCheckPoolLinks(ListHead);
144 }
145
146 VOID
147 NTAPI
148 ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead,
149 IN PLIST_ENTRY Entry)
150 {
151 PLIST_ENTRY Flink;
152 ExpCheckPoolLinks(ListHead);
153 Flink = ExpDecodePoolLink(ListHead->Flink);
154 Entry->Flink = ExpEncodePoolLink(Flink);
155 Entry->Blink = ExpEncodePoolLink(ListHead);
156 Flink->Blink = ExpEncodePoolLink(Entry);
157 ListHead->Flink = ExpEncodePoolLink(Entry);
158 ExpCheckPoolLinks(ListHead);
159 }
160
161 VOID
162 NTAPI
163 ExpCheckPoolHeader(IN PPOOL_HEADER Entry)
164 {
165 PPOOL_HEADER PreviousEntry, NextEntry;
166
167 /* Is there a block before this one? */
168 if (Entry->PreviousSize)
169 {
170 /* Get it */
171 PreviousEntry = POOL_PREV_BLOCK(Entry);
172
173 /* The two blocks must be on the same page! */
174 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry))
175 {
176 /* Something is awry */
177 KeBugCheckEx(BAD_POOL_HEADER,
178 6,
179 (ULONG_PTR)PreviousEntry,
180 __LINE__,
181 (ULONG_PTR)Entry);
182 }
183
184 /* This block should also indicate that it's as large as we think it is */
185 if (PreviousEntry->BlockSize != Entry->PreviousSize)
186 {
187 /* Otherwise, someone corrupted one of the sizes */
188 KeBugCheckEx(BAD_POOL_HEADER,
189 5,
190 (ULONG_PTR)PreviousEntry,
191 __LINE__,
192 (ULONG_PTR)Entry);
193 }
194 }
195 else if (PAGE_ALIGN(Entry) != Entry)
196 {
197 /* If there's no block before us, we are the first block, so we should be on a page boundary */
198 KeBugCheckEx(BAD_POOL_HEADER,
199 7,
200 0,
201 __LINE__,
202 (ULONG_PTR)Entry);
203 }
204
205 /* This block must have a size */
206 if (!Entry->BlockSize)
207 {
208 /* Someone must've corrupted this field */
209 KeBugCheckEx(BAD_POOL_HEADER,
210 8,
211 0,
212 __LINE__,
213 (ULONG_PTR)Entry);
214 }
215
216 /* Okay, now get the next block */
217 NextEntry = POOL_NEXT_BLOCK(Entry);
218
219 /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
220 if (PAGE_ALIGN(NextEntry) != NextEntry)
221 {
222 /* The two blocks must be on the same page! */
223 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry))
224 {
225 /* Something is messed up */
226 KeBugCheckEx(BAD_POOL_HEADER,
227 9,
228 (ULONG_PTR)NextEntry,
229 __LINE__,
230 (ULONG_PTR)Entry);
231 }
232
233 /* And this block should think we are as large as we truly are */
234 if (NextEntry->PreviousSize != Entry->BlockSize)
235 {
236 /* Otherwise, someone corrupted the field */
237 KeBugCheckEx(BAD_POOL_HEADER,
238 5,
239 (ULONG_PTR)NextEntry,
240 __LINE__,
241 (ULONG_PTR)Entry);
242 }
243 }
244 }
245
246 VOID
247 NTAPI
248 ExpCheckPoolBlocks(IN PVOID Block)
249 {
250 BOOLEAN FoundBlock = FALSE;
251 SIZE_T Size = 0;
252 PPOOL_HEADER Entry;
253
254 /* Get the first entry for this page, make sure it really is the first */
255 Entry = PAGE_ALIGN(Block);
256 ASSERT(Entry->PreviousSize == 0);
257
258 /* Now scan each entry */
259 while (TRUE)
260 {
261 /* When we actually found our block, remember this */
262 if (Entry == Block) FoundBlock = TRUE;
263
264 /* Now validate this block header */
265 ExpCheckPoolHeader(Entry);
266
267 /* And go to the next one, keeping track of our size */
268 Size += Entry->BlockSize;
269 Entry = POOL_NEXT_BLOCK(Entry);
270
271 /* If we hit the last block, stop */
272 if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break;
273
274 /* If we hit the end of the page, stop */
275 if (PAGE_ALIGN(Entry) == Entry) break;
276 }
277
278 /* We must've found our block, and we must have hit the end of the page */
279 if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock))
280 {
281 /* Otherwise, the blocks are messed up */
282 KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry);
283 }
284 }
285
286 /* PRIVATE FUNCTIONS **********************************************************/
287
288 VOID
289 NTAPI
290 INIT_FUNCTION
291 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor,
292 IN POOL_TYPE PoolType,
293 IN ULONG PoolIndex,
294 IN ULONG Threshold,
295 IN PVOID PoolLock)
296 {
297 PLIST_ENTRY NextEntry, LastEntry;
298
299 //
300 // Setup the descriptor based on the caller's request
301 //
302 PoolDescriptor->PoolType = PoolType;
303 PoolDescriptor->PoolIndex = PoolIndex;
304 PoolDescriptor->Threshold = Threshold;
305 PoolDescriptor->LockAddress = PoolLock;
306
307 //
308 // Initialize accounting data
309 //
310 PoolDescriptor->RunningAllocs = 0;
311 PoolDescriptor->RunningDeAllocs = 0;
312 PoolDescriptor->TotalPages = 0;
313 PoolDescriptor->TotalBytes = 0;
314 PoolDescriptor->TotalBigPages = 0;
315
316 //
317 // Nothing pending for now
318 //
319 PoolDescriptor->PendingFrees = NULL;
320 PoolDescriptor->PendingFreeDepth = 0;
321
322 //
323 // Loop all the descriptor's allocation lists and initialize them
324 //
325 NextEntry = PoolDescriptor->ListHeads;
326 LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
327 while (NextEntry < LastEntry)
328 {
329 ExpInitializePoolListHead(NextEntry);
330 NextEntry++;
331 }
332 }
333
334 VOID
335 NTAPI
336 INIT_FUNCTION
337 InitializePool(IN POOL_TYPE PoolType,
338 IN ULONG Threshold)
339 {
340 PPOOL_DESCRIPTOR Descriptor;
341
342 //
343 // Check what kind of pool this is
344 //
345 if (PoolType == NonPagedPool)
346 {
347 //
348 // Initialize the nonpaged pool descriptor
349 //
350 PoolVector[NonPagedPool] = &NonPagedPoolDescriptor;
351 ExInitializePoolDescriptor(PoolVector[NonPagedPool],
352 NonPagedPool,
353 0,
354 Threshold,
355 NULL);
356 }
357 else
358 {
359 //
360 // Allocate the pool descriptor
361 //
362 Descriptor = ExAllocatePoolWithTag(NonPagedPool,
363 sizeof(KGUARDED_MUTEX) +
364 sizeof(POOL_DESCRIPTOR),
365 'looP');
366 if (!Descriptor)
367 {
368 //
369 // This is really bad...
370 //
371 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
372 0,
373 -1,
374 -1,
375 -1);
376 }
377
378 //
379 // Setup the vector and guarded mutex for paged pool
380 //
381 PoolVector[PagedPool] = Descriptor;
382 ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1);
383 KeInitializeGuardedMutex(ExpPagedPoolMutex);
384 ExInitializePoolDescriptor(Descriptor,
385 PagedPool,
386 0,
387 Threshold,
388 ExpPagedPoolMutex);
389 }
390 }
391
392 FORCEINLINE
393 KIRQL
394 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
395 {
396 //
397 // Check if this is nonpaged pool
398 //
399 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
400 {
401 //
402 // Use the queued spin lock
403 //
404 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
405 }
406 else
407 {
408 //
409 // Use the guarded mutex
410 //
411 KeAcquireGuardedMutex(Descriptor->LockAddress);
412 return APC_LEVEL;
413 }
414 }
415
416 FORCEINLINE
417 VOID
418 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor,
419 IN KIRQL OldIrql)
420 {
421 //
422 // Check if this is nonpaged pool
423 //
424 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
425 {
426 //
427 // Use the queued spin lock
428 //
429 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
430 }
431 else
432 {
433 //
434 // Use the guarded mutex
435 //
436 KeReleaseGuardedMutex(Descriptor->LockAddress);
437 }
438 }
439
440 /* PUBLIC FUNCTIONS ***********************************************************/
441
442 /*
443 * @implemented
444 */
445 PVOID
446 NTAPI
447 ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
448 IN SIZE_T NumberOfBytes,
449 IN ULONG Tag)
450 {
451 PPOOL_DESCRIPTOR PoolDesc;
452 PLIST_ENTRY ListHead;
453 PPOOL_HEADER Entry, NextEntry, FragmentEntry;
454 KIRQL OldIrql;
455 USHORT BlockSize, i;
456
457 //
458 // Some sanity checks
459 //
460 ASSERT(Tag != 0);
461 ASSERT(Tag != ' GIB');
462 ASSERT(NumberOfBytes != 0);
463
464 //
465 // Get the pool type and its corresponding vector for this request
466 //
467 PoolType = PoolType & BASE_POOL_TYPE_MASK;
468 PoolDesc = PoolVector[PoolType];
469 ASSERT(PoolDesc != NULL);
470
471 //
472 // Check if this is a big page allocation
473 //
474 if (NumberOfBytes > POOL_MAX_ALLOC)
475 {
476 //
477 // Then just return the number of pages requested
478 //
479 return MiAllocatePoolPages(PoolType, NumberOfBytes);
480 }
481
482 //
483 // Should never request 0 bytes from the pool, but since so many drivers do
484 // it, we'll just assume they want 1 byte, based on NT's similar behavior
485 //
486 if (!NumberOfBytes) NumberOfBytes = 1;
487
488 //
489 // A pool allocation is defined by its data, a linked list to connect it to
490 // the free list (if necessary), and a pool header to store accounting info.
491 // Calculate this size, then convert it into a block size (units of pool
492 // headers)
493 //
494 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
495 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
496 // the direct allocation of pages.
497 //
498 i = (USHORT)((NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1))
499 / POOL_BLOCK_SIZE);
500
501 //
502 // Loop in the free lists looking for a block if this size. Start with the
503 // list optimized for this kind of size lookup
504 //
505 ListHead = &PoolDesc->ListHeads[i];
506 do
507 {
508 //
509 // Are there any free entries available on this list?
510 //
511 if (!ExpIsPoolListEmpty(ListHead))
512 {
513 //
514 // Acquire the pool lock now
515 //
516 OldIrql = ExLockPool(PoolDesc);
517
518 //
519 // And make sure the list still has entries
520 //
521 if (ExpIsPoolListEmpty(ListHead))
522 {
523 //
524 // Someone raced us (and won) before we had a chance to acquire
525 // the lock.
526 //
527 // Try again!
528 //
529 ExUnlockPool(PoolDesc, OldIrql);
530 ListHead++;
531 continue;
532 }
533
534 //
535 // Remove a free entry from the list
536 // Note that due to the way we insert free blocks into multiple lists
537 // there is a guarantee that any block on this list will either be
538 // of the correct size, or perhaps larger.
539 //
540 ExpCheckPoolLinks(ListHead);
541 Entry = POOL_ENTRY(ExpRemovePoolHeadList(ListHead));
542 ExpCheckPoolLinks(ListHead);
543 ExpCheckPoolBlocks(Entry);
544 ASSERT(Entry->BlockSize >= i);
545 ASSERT(Entry->PoolType == 0);
546
547 //
548 // Check if this block is larger that what we need. The block could
549 // not possibly be smaller, due to the reason explained above (and
550 // we would've asserted on a checked build if this was the case).
551 //
552 if (Entry->BlockSize != i)
553 {
554 //
555 // Is there an entry before this one?
556 //
557 if (Entry->PreviousSize == 0)
558 {
559 //
560 // There isn't anyone before us, so take the next block and
561 // turn it into a fragment that contains the leftover data
562 // that we don't need to satisfy the caller's request
563 //
564 FragmentEntry = POOL_BLOCK(Entry, i);
565 FragmentEntry->BlockSize = Entry->BlockSize - i;
566
567 //
568 // And make it point back to us
569 //
570 FragmentEntry->PreviousSize = i;
571
572 //
573 // Now get the block that follows the new fragment and check
574 // if it's still on the same page as us (and not at the end)
575 //
576 NextEntry = POOL_NEXT_BLOCK(FragmentEntry);
577 if (PAGE_ALIGN(NextEntry) != NextEntry)
578 {
579 //
580 // Adjust this next block to point to our newly created
581 // fragment block
582 //
583 NextEntry->PreviousSize = FragmentEntry->BlockSize;
584 }
585 }
586 else
587 {
588 //
589 // There is a free entry before us, which we know is smaller
590 // so we'll make this entry the fragment instead
591 //
592 FragmentEntry = Entry;
593
594 //
595 // And then we'll remove from it the actual size required.
596 // Now the entry is a leftover free fragment
597 //
598 Entry->BlockSize -= i;
599
600 //
601 // Now let's go to the next entry after the fragment (which
602 // used to point to our original free entry) and make it
603 // reference the new fragment entry instead.
604 //
605 // This is the entry that will actually end up holding the
606 // allocation!
607 //
608 Entry = POOL_NEXT_BLOCK(Entry);
609 Entry->PreviousSize = FragmentEntry->BlockSize;
610
611 //
612 // And now let's go to the entry after that one and check if
613 // it's still on the same page, and not at the end
614 //
615 NextEntry = POOL_BLOCK(Entry, i);
616 if (PAGE_ALIGN(NextEntry) != NextEntry)
617 {
618 //
619 // Make it reference the allocation entry
620 //
621 NextEntry->PreviousSize = i;
622 }
623 }
624
625 //
626 // Now our (allocation) entry is the right size
627 //
628 Entry->BlockSize = i;
629
630 //
631 // And the next entry is now the free fragment which contains
632 // the remaining difference between how big the original entry
633 // was, and the actual size the caller needs/requested.
634 //
635 FragmentEntry->PoolType = 0;
636 BlockSize = FragmentEntry->BlockSize;
637
638 //
639 // Now check if enough free bytes remained for us to have a
640 // "full" entry, which contains enough bytes for a linked list
641 // and thus can be used for allocations (up to 8 bytes...)
642 //
643 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
644 if (BlockSize != 1)
645 {
646 //
647 // Insert the free entry into the free list for this size
648 //
649 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
650 POOL_FREE_BLOCK(FragmentEntry));
651 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
652 }
653 }
654
655 //
656 // We have found an entry for this allocation, so set the pool type
657 // and release the lock since we're done
658 //
659 Entry->PoolType = PoolType + 1;
660 ExpCheckPoolBlocks(Entry);
661 ExUnlockPool(PoolDesc, OldIrql);
662
663 //
664 // Return the pool allocation
665 //
666 Entry->PoolTag = Tag;
667 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
668 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
669 return POOL_FREE_BLOCK(Entry);
670 }
671 } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
672
673 //
674 // There were no free entries left, so we have to allocate a new fresh page
675 //
676 Entry = MiAllocatePoolPages(PoolType, PAGE_SIZE);
677 ASSERT(Entry != NULL);
678 Entry->Ulong1 = 0;
679 Entry->BlockSize = i;
680 Entry->PoolType = PoolType + 1;
681
682 //
683 // This page will have two entries -- one for the allocation (which we just
684 // created above), and one for the remaining free bytes, which we're about
685 // to create now. The free bytes are the whole page minus what was allocated
686 // and then converted into units of block headers.
687 //
688 BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i;
689 FragmentEntry = POOL_BLOCK(Entry, i);
690 FragmentEntry->Ulong1 = 0;
691 FragmentEntry->BlockSize = BlockSize;
692 FragmentEntry->PreviousSize = i;
693
694 //
695 // Now check if enough free bytes remained for us to have a "full" entry,
696 // which contains enough bytes for a linked list and thus can be used for
697 // allocations (up to 8 bytes...)
698 //
699 if (FragmentEntry->BlockSize != 1)
700 {
701 //
702 // Excellent -- acquire the pool lock
703 //
704 OldIrql = ExLockPool(PoolDesc);
705
706 //
707 // And insert the free entry into the free list for this block size
708 //
709 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
710 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
711 POOL_FREE_BLOCK(FragmentEntry));
712 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
713
714 //
715 // Release the pool lock
716 //
717 ExpCheckPoolBlocks(Entry);
718 ExUnlockPool(PoolDesc, OldIrql);
719 }
720
721 //
722 // And return the pool allocation
723 //
724 ExpCheckPoolBlocks(Entry);
725 Entry->PoolTag = Tag;
726 return POOL_FREE_BLOCK(Entry);
727 }
728
729 /*
730 * @implemented
731 */
732 PVOID
733 NTAPI
734 ExAllocatePool(POOL_TYPE PoolType,
735 SIZE_T NumberOfBytes)
736 {
737 //
738 // Use a default tag of "None"
739 //
740 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, TAG_NONE);
741 }
742
743 /*
744 * @implemented
745 */
746 VOID
747 NTAPI
748 ExFreePoolWithTag(IN PVOID P,
749 IN ULONG TagToFree)
750 {
751 PPOOL_HEADER Entry, NextEntry;
752 USHORT BlockSize;
753 KIRQL OldIrql;
754 POOL_TYPE PoolType;
755 PPOOL_DESCRIPTOR PoolDesc;
756 BOOLEAN Combined = FALSE;
757
758 //
759 // Quickly deal with big page allocations
760 //
761 if (PAGE_ALIGN(P) == P)
762 {
763 MiFreePoolPages(P);
764 return;
765 }
766
767 //
768 // Get the entry for this pool allocation
769 // The pointer math here may look wrong or confusing, but it is quite right
770 //
771 Entry = P;
772 Entry--;
773
774 //
775 // Get the size of the entry, and it's pool type, then load the descriptor
776 // for this pool type
777 //
778 BlockSize = Entry->BlockSize;
779 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
780 PoolDesc = PoolVector[PoolType];
781
782 //
783 // Get the pointer to the next entry
784 //
785 NextEntry = POOL_BLOCK(Entry, BlockSize);
786
787 //
788 // Acquire the pool lock
789 //
790 OldIrql = ExLockPool(PoolDesc);
791
792 //
793 // Check block tag
794 //
795 if (TagToFree && TagToFree != Entry->PoolTag)
796 {
797 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Entry->PoolTag);
798 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Entry->PoolTag, TagToFree);
799 }
800
801 //
802 // Check if the next allocation is at the end of the page
803 //
804 ExpCheckPoolBlocks(Entry);
805 if (PAGE_ALIGN(NextEntry) != NextEntry)
806 {
807 //
808 // We may be able to combine the block if it's free
809 //
810 if (NextEntry->PoolType == 0)
811 {
812 //
813 // The next block is free, so we'll do a combine
814 //
815 Combined = TRUE;
816
817 //
818 // Make sure there's actual data in the block -- anything smaller
819 // than this means we only have the header, so there's no linked list
820 // for us to remove
821 //
822 if ((NextEntry->BlockSize != 1))
823 {
824 //
825 // The block is at least big enough to have a linked list, so go
826 // ahead and remove it
827 //
828 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
829 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
830 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
831 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
832 }
833
834 //
835 // Our entry is now combined with the next entry
836 //
837 Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
838 }
839 }
840
841 //
842 // Now check if there was a previous entry on the same page as us
843 //
844 if (Entry->PreviousSize)
845 {
846 //
847 // Great, grab that entry and check if it's free
848 //
849 NextEntry = POOL_PREV_BLOCK(Entry);
850 if (NextEntry->PoolType == 0)
851 {
852 //
853 // It is, so we can do a combine
854 //
855 Combined = TRUE;
856
857 //
858 // Make sure there's actual data in the block -- anything smaller
859 // than this means we only have the header so there's no linked list
860 // for us to remove
861 //
862 if ((NextEntry->BlockSize != 1))
863 {
864 //
865 // The block is at least big enough to have a linked list, so go
866 // ahead and remove it
867 //
868 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
869 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
870 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
871 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
872 }
873
874 //
875 // Combine our original block (which might've already been combined
876 // with the next block), into the previous block
877 //
878 NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
879
880 //
881 // And now we'll work with the previous block instead
882 //
883 Entry = NextEntry;
884 }
885 }
886
887 //
888 // By now, it may have been possible for our combined blocks to actually
889 // have made up a full page (if there were only 2-3 allocations on the
890 // page, they could've all been combined).
891 //
892 if ((PAGE_ALIGN(Entry) == Entry) &&
893 (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry)) == POOL_NEXT_BLOCK(Entry)))
894 {
895 //
896 // In this case, release the pool lock, and free the page
897 //
898 ExUnlockPool(PoolDesc, OldIrql);
899 MiFreePoolPages(Entry);
900 return;
901 }
902
903 //
904 // Otherwise, we now have a free block (or a combination of 2 or 3)
905 //
906 Entry->PoolType = 0;
907 BlockSize = Entry->BlockSize;
908 ASSERT(BlockSize != 1);
909
910 //
911 // Check if we actually did combine it with anyone
912 //
913 if (Combined)
914 {
915 //
916 // Get the first combined block (either our original to begin with, or
917 // the one after the original, depending if we combined with the previous)
918 //
919 NextEntry = POOL_NEXT_BLOCK(Entry);
920
921 //
922 // As long as the next block isn't on a page boundary, have it point
923 // back to us
924 //
925 if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
926 }
927
928 //
929 // Insert this new free block, and release the pool lock
930 //
931 ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
932 ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry));
933 ExUnlockPool(PoolDesc, OldIrql);
934 }
935
936 /*
937 * @implemented
938 */
939 VOID
940 NTAPI
941 ExFreePool(PVOID P)
942 {
943 //
944 // Just free without checking for the tag
945 //
946 ExFreePoolWithTag(P, 0);
947 }
948
949 /*
950 * @unimplemented
951 */
952 SIZE_T
953 NTAPI
954 ExQueryPoolBlockSize(IN PVOID PoolBlock,
955 OUT PBOOLEAN QuotaCharged)
956 {
957 //
958 // Not implemented
959 //
960 UNIMPLEMENTED;
961 return FALSE;
962 }
963
964 /*
965 * @implemented
966 */
967
968 PVOID
969 NTAPI
970 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType,
971 IN SIZE_T NumberOfBytes)
972 {
973 //
974 // Allocate the pool
975 //
976 return ExAllocatePoolWithQuotaTag(PoolType, NumberOfBytes, 'enoN');
977 }
978
979 /*
980 * @implemented
981 */
982 PVOID
983 NTAPI
984 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType,
985 IN SIZE_T NumberOfBytes,
986 IN ULONG Tag,
987 IN EX_POOL_PRIORITY Priority)
988 {
989 //
990 // Allocate the pool
991 //
992 UNIMPLEMENTED;
993 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
994 }
995
996 /*
997 * @implemented
998 */
999 PVOID
1000 NTAPI
1001 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType,
1002 IN SIZE_T NumberOfBytes,
1003 IN ULONG Tag)
1004 {
1005 //
1006 // Allocate the pool
1007 //
1008 UNIMPLEMENTED;
1009 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
1010 }
1011
1012 /* EOF */