2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
9 /* INCLUDES *******************************************************************/
15 #line 15 "ARMĀ³::EXPOOL"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
19 #undef ExAllocatePoolWithQuota
20 #undef ExAllocatePoolWithQuotaTag
22 BOOLEAN AllowPagedPool
= TRUE
;
24 /* GLOBALS ********************************************************************/
26 ULONG ExpNumberOfPagedPools
;
27 POOL_DESCRIPTOR NonPagedPoolDescriptor
;
28 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor
[16 + 1];
29 PPOOL_DESCRIPTOR PoolVector
[2];
31 PKGUARDED_MUTEX ExpPagedPoolMutex
;
33 /* Pool block/header/list access macros */
34 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
35 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER))
36 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
37 #define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
38 #define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize))
41 * Pool list access debug macros, similar to Arthur's pfnlist.c work.
42 * Microsoft actually implements similar checks in the Windows Server 2003 SP1
43 * pool code, but only for checked builds.
45 * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
46 * that these checks are done even on retail builds, due to the increasing
47 * number of kernel-mode attacks which depend on dangling list pointers and other
48 * kinds of list-based attacks.
50 * For now, I will leave these checks on all the time, but later they are likely
51 * to be DBG-only, at least until there are enough kernel-mode security attacks
52 * against ReactOS to warrant the performance hit.
54 * For now, these are not made inline, so we can get good stack traces.
58 ExpDecodePoolLink(IN PLIST_ENTRY Link
)
60 return (PLIST_ENTRY
)((ULONG_PTR
)Link
& ~1);
65 ExpEncodePoolLink(IN PLIST_ENTRY Link
)
67 return (PLIST_ENTRY
)((ULONG_PTR
)Link
| 1);
72 ExpCheckPoolLinks(IN PLIST_ENTRY ListHead
)
74 if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead
->Flink
)->Blink
) != ListHead
) ||
75 (ExpDecodePoolLink(ExpDecodePoolLink(ListHead
->Blink
)->Flink
) != ListHead
))
77 KeBugCheckEx(BAD_POOL_HEADER
,
80 (ULONG_PTR
)ExpDecodePoolLink(ExpDecodePoolLink(ListHead
->Flink
)->Blink
),
81 (ULONG_PTR
)ExpDecodePoolLink(ExpDecodePoolLink(ListHead
->Blink
)->Flink
));
87 ExpInitializePoolListHead(IN PLIST_ENTRY ListHead
)
89 ListHead
->Flink
= ListHead
->Blink
= ExpEncodePoolLink(ListHead
);
94 ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead
)
96 return (ExpDecodePoolLink(ListHead
->Flink
) == ListHead
);
101 ExpRemovePoolEntryList(IN PLIST_ENTRY Entry
)
103 PLIST_ENTRY Blink
, Flink
;
104 Flink
= ExpDecodePoolLink(Entry
->Flink
);
105 Blink
= ExpDecodePoolLink(Entry
->Blink
);
106 Flink
->Blink
= ExpEncodePoolLink(Blink
);
107 Blink
->Flink
= ExpEncodePoolLink(Flink
);
112 ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead
)
114 PLIST_ENTRY Entry
, Flink
;
115 Entry
= ExpDecodePoolLink(ListHead
->Flink
);
116 Flink
= ExpDecodePoolLink(Entry
->Flink
);
117 ListHead
->Flink
= ExpEncodePoolLink(Flink
);
118 Flink
->Blink
= ExpEncodePoolLink(ListHead
);
124 ExpRemovePoolTailList(IN PLIST_ENTRY ListHead
)
126 PLIST_ENTRY Entry
, Blink
;
127 Entry
= ExpDecodePoolLink(ListHead
->Blink
);
128 Blink
= ExpDecodePoolLink(Entry
->Blink
);
129 ListHead
->Blink
= ExpEncodePoolLink(Blink
);
130 Blink
->Flink
= ExpEncodePoolLink(ListHead
);
136 ExpInsertPoolTailList(IN PLIST_ENTRY ListHead
,
137 IN PLIST_ENTRY Entry
)
140 ExpCheckPoolLinks(ListHead
);
141 Blink
= ExpDecodePoolLink(ListHead
->Blink
);
142 Entry
->Flink
= ExpEncodePoolLink(ListHead
);
143 Entry
->Blink
= ExpEncodePoolLink(Blink
);
144 Blink
->Flink
= ExpEncodePoolLink(Entry
);
145 ListHead
->Blink
= ExpEncodePoolLink(Entry
);
146 ExpCheckPoolLinks(ListHead
);
151 ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead
,
152 IN PLIST_ENTRY Entry
)
155 ExpCheckPoolLinks(ListHead
);
156 Flink
= ExpDecodePoolLink(ListHead
->Flink
);
157 Entry
->Flink
= ExpEncodePoolLink(Flink
);
158 Entry
->Blink
= ExpEncodePoolLink(ListHead
);
159 Flink
->Blink
= ExpEncodePoolLink(Entry
);
160 ListHead
->Flink
= ExpEncodePoolLink(Entry
);
161 ExpCheckPoolLinks(ListHead
);
166 ExpCheckPoolHeader(IN PPOOL_HEADER Entry
)
168 PPOOL_HEADER PreviousEntry
, NextEntry
;
170 /* Is there a block before this one? */
171 if (Entry
->PreviousSize
)
174 PreviousEntry
= POOL_PREV_BLOCK(Entry
);
176 /* The two blocks must be on the same page! */
177 if (PAGE_ALIGN(Entry
) != PAGE_ALIGN(PreviousEntry
))
179 /* Something is awry */
180 KeBugCheckEx(BAD_POOL_HEADER
,
182 (ULONG_PTR
)PreviousEntry
,
187 /* This block should also indicate that it's as large as we think it is */
188 if (PreviousEntry
->BlockSize
!= Entry
->PreviousSize
)
190 /* Otherwise, someone corrupted one of the sizes */
191 KeBugCheckEx(BAD_POOL_HEADER
,
193 (ULONG_PTR
)PreviousEntry
,
198 else if (PAGE_ALIGN(Entry
) != Entry
)
200 /* If there's no block before us, we are the first block, so we should be on a page boundary */
201 KeBugCheckEx(BAD_POOL_HEADER
,
208 /* This block must have a size */
209 if (!Entry
->BlockSize
)
211 /* Someone must've corrupted this field */
212 KeBugCheckEx(BAD_POOL_HEADER
,
219 /* Okay, now get the next block */
220 NextEntry
= POOL_NEXT_BLOCK(Entry
);
222 /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
223 if (PAGE_ALIGN(NextEntry
) != NextEntry
)
225 /* The two blocks must be on the same page! */
226 if (PAGE_ALIGN(Entry
) != PAGE_ALIGN(NextEntry
))
228 /* Something is messed up */
229 KeBugCheckEx(BAD_POOL_HEADER
,
231 (ULONG_PTR
)NextEntry
,
236 /* And this block should think we are as large as we truly are */
237 if (NextEntry
->PreviousSize
!= Entry
->BlockSize
)
239 /* Otherwise, someone corrupted the field */
240 KeBugCheckEx(BAD_POOL_HEADER
,
242 (ULONG_PTR
)NextEntry
,
251 ExpCheckPoolBlocks(IN PVOID Block
)
257 /* Get the first entry for this page, make sure it really is the first */
258 Entry
= PAGE_ALIGN(Block
);
259 ASSERT(Entry
->PreviousSize
== 0);
261 /* Now scan each entry */
264 /* When we actually found our block, remember this */
265 if (Entry
== Block
) FoundBlock
= TRUE
;
267 /* Now validate this block header */
268 ExpCheckPoolHeader(Entry
);
270 /* And go to the next one, keeping track of our size */
271 Size
+= Entry
->BlockSize
;
272 Entry
= POOL_NEXT_BLOCK(Entry
);
274 /* If we hit the last block, stop */
275 if (Size
>= (PAGE_SIZE
/ POOL_BLOCK_SIZE
)) break;
277 /* If we hit the end of the page, stop */
278 if (PAGE_ALIGN(Entry
) == Entry
) break;
281 /* We must've found our block, and we must have hit the end of the page */
282 if ((PAGE_ALIGN(Entry
) != Entry
) || !(FoundBlock
))
284 /* Otherwise, the blocks are messed up */
285 KeBugCheckEx(BAD_POOL_HEADER
, 10, (ULONG_PTR
)Block
, __LINE__
, (ULONG_PTR
)Entry
);
289 /* PRIVATE FUNCTIONS **********************************************************/
293 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor
,
294 IN POOL_TYPE PoolType
,
299 PLIST_ENTRY NextEntry
, LastEntry
;
302 // Setup the descriptor based on the caller's request
304 PoolDescriptor
->PoolType
= PoolType
;
305 PoolDescriptor
->PoolIndex
= PoolIndex
;
306 PoolDescriptor
->Threshold
= Threshold
;
307 PoolDescriptor
->LockAddress
= PoolLock
;
310 // Initialize accounting data
312 PoolDescriptor
->RunningAllocs
= 0;
313 PoolDescriptor
->RunningDeAllocs
= 0;
314 PoolDescriptor
->TotalPages
= 0;
315 PoolDescriptor
->TotalBytes
= 0;
316 PoolDescriptor
->TotalBigPages
= 0;
319 // Nothing pending for now
321 PoolDescriptor
->PendingFrees
= NULL
;
322 PoolDescriptor
->PendingFreeDepth
= 0;
325 // Loop all the descriptor's allocation lists and initialize them
327 NextEntry
= PoolDescriptor
->ListHeads
;
328 LastEntry
= NextEntry
+ POOL_LISTS_PER_PAGE
;
329 while (NextEntry
< LastEntry
)
331 ExpInitializePoolListHead(NextEntry
);
338 InitializePool(IN POOL_TYPE PoolType
,
341 PPOOL_DESCRIPTOR Descriptor
;
344 // Check what kind of pool this is
346 if (PoolType
== NonPagedPool
)
349 // Initialize the nonpaged pool descriptor
351 PoolVector
[NonPagedPool
] = &NonPagedPoolDescriptor
;
352 ExInitializePoolDescriptor(PoolVector
[NonPagedPool
],
361 // Allocate the pool descriptor
363 Descriptor
= ExAllocatePoolWithTag(NonPagedPool
,
364 sizeof(KGUARDED_MUTEX
) +
365 sizeof(POOL_DESCRIPTOR
),
370 // This is really bad...
372 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY
,
380 // Setup the vector and guarded mutex for paged pool
382 PoolVector
[PagedPool
] = Descriptor
;
383 ExpPagedPoolMutex
= (PKGUARDED_MUTEX
)(Descriptor
+ 1);
384 KeInitializeGuardedMutex(ExpPagedPoolMutex
);
385 ExInitializePoolDescriptor(Descriptor
,
395 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor
)
398 // Check if this is nonpaged pool
400 if ((Descriptor
->PoolType
& BASE_POOL_TYPE_MASK
) == NonPagedPool
)
403 // Use the queued spin lock
405 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock
);
410 // Use the guarded mutex
412 KeAcquireGuardedMutex(Descriptor
->LockAddress
);
419 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor
,
423 // Check if this is nonpaged pool
425 if ((Descriptor
->PoolType
& BASE_POOL_TYPE_MASK
) == NonPagedPool
)
428 // Use the queued spin lock
430 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock
, OldIrql
);
435 // Use the guarded mutex
437 KeReleaseGuardedMutex(Descriptor
->LockAddress
);
441 /* PUBLIC FUNCTIONS ***********************************************************/
448 ExAllocatePoolWithTag(IN POOL_TYPE PoolType
,
449 IN SIZE_T NumberOfBytes
,
452 PPOOL_DESCRIPTOR PoolDesc
;
453 PLIST_ENTRY ListHead
;
454 PPOOL_HEADER Entry
, NextEntry
, FragmentEntry
;
459 // Check for paged pool
461 if (!(AllowPagedPool
) && (PoolType
== PagedPool
)) return ExAllocatePagedPoolWithTag(PagedPool
, NumberOfBytes
, Tag
);
464 // Some sanity checks
467 ASSERT(Tag
!= ' GIB');
468 ASSERT(NumberOfBytes
!= 0);
471 // Get the pool type and its corresponding vector for this request
473 PoolType
= PoolType
& BASE_POOL_TYPE_MASK
;
474 PoolDesc
= PoolVector
[PoolType
];
475 ASSERT(PoolDesc
!= NULL
);
478 // Check if this is a big page allocation
480 if (NumberOfBytes
> POOL_MAX_ALLOC
)
483 // Then just return the number of pages requested
485 return MiAllocatePoolPages(PoolType
, NumberOfBytes
);
489 // Should never request 0 bytes from the pool, but since so many drivers do
490 // it, we'll just assume they want 1 byte, based on NT's similar behavior
492 if (!NumberOfBytes
) NumberOfBytes
= 1;
495 // A pool allocation is defined by its data, a linked list to connect it to
496 // the free list (if necessary), and a pool header to store accounting info.
497 // Calculate this size, then convert it into a block size (units of pool
500 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
501 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
502 // the direct allocation of pages.
504 i
= (NumberOfBytes
+ sizeof(POOL_HEADER
) + (POOL_BLOCK_SIZE
- 1)) / POOL_BLOCK_SIZE
;
507 // Loop in the free lists looking for a block if this size. Start with the
508 // list optimized for this kind of size lookup
510 ListHead
= &PoolDesc
->ListHeads
[i
];
514 // Are there any free entries available on this list?
516 if (!ExpIsPoolListEmpty(ListHead
))
519 // Acquire the pool lock now
521 OldIrql
= ExLockPool(PoolDesc
);
524 // And make sure the list still has entries
526 if (ExpIsPoolListEmpty(ListHead
))
529 // Someone raced us (and won) before we had a chance to acquire
534 ExUnlockPool(PoolDesc
, OldIrql
);
540 // Remove a free entry from the list
541 // Note that due to the way we insert free blocks into multiple lists
542 // there is a guarantee that any block on this list will either be
543 // of the correct size, or perhaps larger.
545 ExpCheckPoolLinks(ListHead
);
546 Entry
= POOL_ENTRY(ExpRemovePoolHeadList(ListHead
));
547 ExpCheckPoolLinks(ListHead
);
548 ExpCheckPoolBlocks(Entry
);
549 ASSERT(Entry
->BlockSize
>= i
);
550 ASSERT(Entry
->PoolType
== 0);
553 // Check if this block is larger that what we need. The block could
554 // not possibly be smaller, due to the reason explained above (and
555 // we would've asserted on a checked build if this was the case).
557 if (Entry
->BlockSize
!= i
)
560 // Is there an entry before this one?
562 if (Entry
->PreviousSize
== 0)
565 // There isn't anyone before us, so take the next block and
566 // turn it into a fragment that contains the leftover data
567 // that we don't need to satisfy the caller's request
569 FragmentEntry
= POOL_BLOCK(Entry
, i
);
570 FragmentEntry
->BlockSize
= Entry
->BlockSize
- i
;
573 // And make it point back to us
575 FragmentEntry
->PreviousSize
= i
;
578 // Now get the block that follows the new fragment and check
579 // if it's still on the same page as us (and not at the end)
581 NextEntry
= POOL_NEXT_BLOCK(FragmentEntry
);
582 if (PAGE_ALIGN(NextEntry
) != NextEntry
)
585 // Adjust this next block to point to our newly created
588 NextEntry
->PreviousSize
= FragmentEntry
->BlockSize
;
594 // There is a free entry before us, which we know is smaller
595 // so we'll make this entry the fragment instead
597 FragmentEntry
= Entry
;
600 // And then we'll remove from it the actual size required.
601 // Now the entry is a leftover free fragment
603 Entry
->BlockSize
-= i
;
606 // Now let's go to the next entry after the fragment (which
607 // used to point to our original free entry) and make it
608 // reference the new fragment entry instead.
610 // This is the entry that will actually end up holding the
613 Entry
= POOL_NEXT_BLOCK(Entry
);
614 Entry
->PreviousSize
= FragmentEntry
->BlockSize
;
617 // And now let's go to the entry after that one and check if
618 // it's still on the same page, and not at the end
620 NextEntry
= POOL_BLOCK(Entry
, i
);
621 if (PAGE_ALIGN(NextEntry
) != NextEntry
)
624 // Make it reference the allocation entry
626 NextEntry
->PreviousSize
= i
;
631 // Now our (allocation) entry is the right size
633 Entry
->BlockSize
= i
;
636 // And the next entry is now the free fragment which contains
637 // the remaining difference between how big the original entry
638 // was, and the actual size the caller needs/requested.
640 FragmentEntry
->PoolType
= 0;
641 BlockSize
= FragmentEntry
->BlockSize
;
644 // Now check if enough free bytes remained for us to have a
645 // "full" entry, which contains enough bytes for a linked list
646 // and thus can be used for allocations (up to 8 bytes...)
648 ExpCheckPoolLinks(&PoolDesc
->ListHeads
[BlockSize
- 1]);
652 // Insert the free entry into the free list for this size
654 ExpInsertPoolTailList(&PoolDesc
->ListHeads
[BlockSize
- 1],
655 POOL_FREE_BLOCK(FragmentEntry
));
656 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry
));
661 // We have found an entry for this allocation, so set the pool type
662 // and release the lock since we're done
664 Entry
->PoolType
= PoolType
+ 1;
665 ExpCheckPoolBlocks(Entry
);
666 ExUnlockPool(PoolDesc
, OldIrql
);
669 // Return the pool allocation
671 Entry
->PoolTag
= Tag
;
672 (POOL_FREE_BLOCK(Entry
))->Flink
= NULL
;
673 (POOL_FREE_BLOCK(Entry
))->Blink
= NULL
;
674 return POOL_FREE_BLOCK(Entry
);
676 } while (++ListHead
!= &PoolDesc
->ListHeads
[POOL_LISTS_PER_PAGE
]);
679 // There were no free entries left, so we have to allocate a new fresh page
681 Entry
= MiAllocatePoolPages(PoolType
, PAGE_SIZE
);
682 ASSERT(Entry
!= NULL
);
684 Entry
->BlockSize
= i
;
685 Entry
->PoolType
= PoolType
+ 1;
688 // This page will have two entries -- one for the allocation (which we just
689 // created above), and one for the remaining free bytes, which we're about
690 // to create now. The free bytes are the whole page minus what was allocated
691 // and then converted into units of block headers.
693 BlockSize
= (PAGE_SIZE
/ POOL_BLOCK_SIZE
) - i
;
694 FragmentEntry
= POOL_BLOCK(Entry
, i
);
695 FragmentEntry
->Ulong1
= 0;
696 FragmentEntry
->BlockSize
= BlockSize
;
697 FragmentEntry
->PreviousSize
= i
;
700 // Now check if enough free bytes remained for us to have a "full" entry,
701 // which contains enough bytes for a linked list and thus can be used for
702 // allocations (up to 8 bytes...)
704 if (FragmentEntry
->BlockSize
!= 1)
707 // Excellent -- acquire the pool lock
709 OldIrql
= ExLockPool(PoolDesc
);
712 // And insert the free entry into the free list for this block size
714 ExpCheckPoolLinks(&PoolDesc
->ListHeads
[BlockSize
- 1]);
715 ExpInsertPoolTailList(&PoolDesc
->ListHeads
[BlockSize
- 1],
716 POOL_FREE_BLOCK(FragmentEntry
));
717 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry
));
720 // Release the pool lock
722 ExpCheckPoolBlocks(Entry
);
723 ExUnlockPool(PoolDesc
, OldIrql
);
727 // And return the pool allocation
729 ExpCheckPoolBlocks(Entry
);
730 Entry
->PoolTag
= Tag
;
731 return POOL_FREE_BLOCK(Entry
);
739 ExAllocatePool(POOL_TYPE PoolType
,
740 SIZE_T NumberOfBytes
)
743 // Use a default tag of "None"
745 return ExAllocatePoolWithTag(PoolType
, NumberOfBytes
, 'enoN');
753 ExFreePoolWithTag(IN PVOID P
,
756 PPOOL_HEADER Entry
, NextEntry
;
760 PPOOL_DESCRIPTOR PoolDesc
;
761 BOOLEAN Combined
= FALSE
;
764 // Check for paged pool
766 if ((P
>= MmPagedPoolBase
) &&
767 (P
<= (PVOID
)((ULONG_PTR
)MmPagedPoolBase
+ MmPagedPoolSize
)))
777 // Quickly deal with big page allocations
779 if (PAGE_ALIGN(P
) == P
)
786 // Get the entry for this pool allocation
787 // The pointer math here may look wrong or confusing, but it is quite right
793 // Get the size of the entry, and it's pool type, then load the descriptor
794 // for this pool type
796 BlockSize
= Entry
->BlockSize
;
797 PoolType
= (Entry
->PoolType
- 1) & BASE_POOL_TYPE_MASK
;
798 PoolDesc
= PoolVector
[PoolType
];
801 // Get the pointer to the next entry
803 NextEntry
= POOL_BLOCK(Entry
, BlockSize
);
806 // Acquire the pool lock
808 OldIrql
= ExLockPool(PoolDesc
);
811 // Check if the next allocation is at the end of the page
813 ExpCheckPoolBlocks(Entry
);
814 if (PAGE_ALIGN(NextEntry
) != NextEntry
)
817 // We may be able to combine the block if it's free
819 if (NextEntry
->PoolType
== 0)
822 // The next block is free, so we'll do a combine
827 // Make sure there's actual data in the block -- anything smaller
828 // than this means we only have the header, so there's no linked list
831 if ((NextEntry
->BlockSize
!= 1))
834 // The block is at least big enough to have a linked list, so go
835 // ahead and remove it
837 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry
));
838 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry
));
839 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry
))->Flink
));
840 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry
))->Blink
));
844 // Our entry is now combined with the next entry
846 Entry
->BlockSize
= Entry
->BlockSize
+ NextEntry
->BlockSize
;
851 // Now check if there was a previous entry on the same page as us
853 if (Entry
->PreviousSize
)
856 // Great, grab that entry and check if it's free
858 NextEntry
= POOL_PREV_BLOCK(Entry
);
859 if (NextEntry
->PoolType
== 0)
862 // It is, so we can do a combine
867 // Make sure there's actual data in the block -- anything smaller
868 // than this means we only have the header so there's no linked list
871 if ((NextEntry
->BlockSize
!= 1))
874 // The block is at least big enough to have a linked list, so go
875 // ahead and remove it
877 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry
));
878 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry
));
879 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry
))->Flink
));
880 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry
))->Blink
));
884 // Combine our original block (which might've already been combined
885 // with the next block), into the previous block
887 NextEntry
->BlockSize
= NextEntry
->BlockSize
+ Entry
->BlockSize
;
890 // And now we'll work with the previous block instead
897 // By now, it may have been possible for our combined blocks to actually
898 // have made up a full page (if there were only 2-3 allocations on the
899 // page, they could've all been combined).
901 if ((PAGE_ALIGN(Entry
) == Entry
) &&
902 (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry
)) == POOL_NEXT_BLOCK(Entry
)))
905 // In this case, release the pool lock, and free the page
907 ExUnlockPool(PoolDesc
, OldIrql
);
908 MiFreePoolPages(Entry
);
913 // Otherwise, we now have a free block (or a combination of 2 or 3)
916 BlockSize
= Entry
->BlockSize
;
917 ASSERT(BlockSize
!= 1);
920 // Check if we actually did combine it with anyone
925 // Get the first combined block (either our original to begin with, or
926 // the one after the original, depending if we combined with the previous)
928 NextEntry
= POOL_NEXT_BLOCK(Entry
);
931 // As long as the next block isn't on a page boundary, have it point
934 if (PAGE_ALIGN(NextEntry
) != NextEntry
) NextEntry
->PreviousSize
= BlockSize
;
938 // Insert this new free block, and release the pool lock
940 ExpInsertPoolHeadList(&PoolDesc
->ListHeads
[BlockSize
- 1], POOL_FREE_BLOCK(Entry
));
941 ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry
));
942 ExUnlockPool(PoolDesc
, OldIrql
);
953 // Just free without checking for the tag
955 ExFreePoolWithTag(P
, 0);
963 ExQueryPoolBlockSize(IN PVOID PoolBlock
,
964 OUT PBOOLEAN QuotaCharged
)
979 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType
,
980 IN SIZE_T NumberOfBytes
)
985 return ExAllocatePoolWithQuotaTag(PoolType
, NumberOfBytes
, 'enoN');
993 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType
,
994 IN SIZE_T NumberOfBytes
,
996 IN EX_POOL_PRIORITY Priority
)
1002 return ExAllocatePoolWithTag(PoolType
, NumberOfBytes
, Tag
);
1010 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType
,
1011 IN SIZE_T NumberOfBytes
,
1015 // Allocate the pool
1018 return ExAllocatePoolWithTag(PoolType
, NumberOfBytes
, Tag
);