2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
9 /* INCLUDES *******************************************************************/
15 #define MODULE_INVOLVED_IN_ARM3
16 #include "../ARM3/miarm.h"
18 #undef ExAllocatePoolWithQuota
19 #undef ExAllocatePoolWithQuotaTag
21 /* GLOBALS ********************************************************************/
23 ULONG ExpNumberOfPagedPools
;
24 POOL_DESCRIPTOR NonPagedPoolDescriptor
;
25 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor
[16 + 1];
26 PPOOL_DESCRIPTOR PoolVector
[2];
28 PKGUARDED_MUTEX ExpPagedPoolMutex
;
30 /* Pool block/header/list access macros */
31 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
32 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER))
33 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
34 #define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
35 #define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize))
38 * Pool list access debug macros, similar to Arthur's pfnlist.c work.
39 * Microsoft actually implements similar checks in the Windows Server 2003 SP1
40 * pool code, but only for checked builds.
42 * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
43 * that these checks are done even on retail builds, due to the increasing
44 * number of kernel-mode attacks which depend on dangling list pointers and other
45 * kinds of list-based attacks.
47 * For now, I will leave these checks on all the time, but later they are likely
48 * to be DBG-only, at least until there are enough kernel-mode security attacks
49 * against ReactOS to warrant the performance hit.
51 * For now, these are not made inline, so we can get good stack traces.
55 ExpDecodePoolLink(IN PLIST_ENTRY Link
)
57 return (PLIST_ENTRY
)((ULONG_PTR
)Link
& ~1);
62 ExpEncodePoolLink(IN PLIST_ENTRY Link
)
64 return (PLIST_ENTRY
)((ULONG_PTR
)Link
| 1);
69 ExpCheckPoolLinks(IN PLIST_ENTRY ListHead
)
71 if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead
->Flink
)->Blink
) != ListHead
) ||
72 (ExpDecodePoolLink(ExpDecodePoolLink(ListHead
->Blink
)->Flink
) != ListHead
))
74 KeBugCheckEx(BAD_POOL_HEADER
,
77 (ULONG_PTR
)ExpDecodePoolLink(ExpDecodePoolLink(ListHead
->Flink
)->Blink
),
78 (ULONG_PTR
)ExpDecodePoolLink(ExpDecodePoolLink(ListHead
->Blink
)->Flink
));
84 ExpInitializePoolListHead(IN PLIST_ENTRY ListHead
)
86 ListHead
->Flink
= ListHead
->Blink
= ExpEncodePoolLink(ListHead
);
91 ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead
)
93 return (ExpDecodePoolLink(ListHead
->Flink
) == ListHead
);
98 ExpRemovePoolEntryList(IN PLIST_ENTRY Entry
)
100 PLIST_ENTRY Blink
, Flink
;
101 Flink
= ExpDecodePoolLink(Entry
->Flink
);
102 Blink
= ExpDecodePoolLink(Entry
->Blink
);
103 Flink
->Blink
= ExpEncodePoolLink(Blink
);
104 Blink
->Flink
= ExpEncodePoolLink(Flink
);
109 ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead
)
111 PLIST_ENTRY Entry
, Flink
;
112 Entry
= ExpDecodePoolLink(ListHead
->Flink
);
113 Flink
= ExpDecodePoolLink(Entry
->Flink
);
114 ListHead
->Flink
= ExpEncodePoolLink(Flink
);
115 Flink
->Blink
= ExpEncodePoolLink(ListHead
);
121 ExpRemovePoolTailList(IN PLIST_ENTRY ListHead
)
123 PLIST_ENTRY Entry
, Blink
;
124 Entry
= ExpDecodePoolLink(ListHead
->Blink
);
125 Blink
= ExpDecodePoolLink(Entry
->Blink
);
126 ListHead
->Blink
= ExpEncodePoolLink(Blink
);
127 Blink
->Flink
= ExpEncodePoolLink(ListHead
);
133 ExpInsertPoolTailList(IN PLIST_ENTRY ListHead
,
134 IN PLIST_ENTRY Entry
)
137 ExpCheckPoolLinks(ListHead
);
138 Blink
= ExpDecodePoolLink(ListHead
->Blink
);
139 Entry
->Flink
= ExpEncodePoolLink(ListHead
);
140 Entry
->Blink
= ExpEncodePoolLink(Blink
);
141 Blink
->Flink
= ExpEncodePoolLink(Entry
);
142 ListHead
->Blink
= ExpEncodePoolLink(Entry
);
143 ExpCheckPoolLinks(ListHead
);
148 ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead
,
149 IN PLIST_ENTRY Entry
)
152 ExpCheckPoolLinks(ListHead
);
153 Flink
= ExpDecodePoolLink(ListHead
->Flink
);
154 Entry
->Flink
= ExpEncodePoolLink(Flink
);
155 Entry
->Blink
= ExpEncodePoolLink(ListHead
);
156 Flink
->Blink
= ExpEncodePoolLink(Entry
);
157 ListHead
->Flink
= ExpEncodePoolLink(Entry
);
158 ExpCheckPoolLinks(ListHead
);
163 ExpCheckPoolHeader(IN PPOOL_HEADER Entry
)
165 PPOOL_HEADER PreviousEntry
, NextEntry
;
167 /* Is there a block before this one? */
168 if (Entry
->PreviousSize
)
171 PreviousEntry
= POOL_PREV_BLOCK(Entry
);
173 /* The two blocks must be on the same page! */
174 if (PAGE_ALIGN(Entry
) != PAGE_ALIGN(PreviousEntry
))
176 /* Something is awry */
177 KeBugCheckEx(BAD_POOL_HEADER
,
179 (ULONG_PTR
)PreviousEntry
,
184 /* This block should also indicate that it's as large as we think it is */
185 if (PreviousEntry
->BlockSize
!= Entry
->PreviousSize
)
187 /* Otherwise, someone corrupted one of the sizes */
188 KeBugCheckEx(BAD_POOL_HEADER
,
190 (ULONG_PTR
)PreviousEntry
,
195 else if (PAGE_ALIGN(Entry
) != Entry
)
197 /* If there's no block before us, we are the first block, so we should be on a page boundary */
198 KeBugCheckEx(BAD_POOL_HEADER
,
205 /* This block must have a size */
206 if (!Entry
->BlockSize
)
208 /* Someone must've corrupted this field */
209 KeBugCheckEx(BAD_POOL_HEADER
,
216 /* Okay, now get the next block */
217 NextEntry
= POOL_NEXT_BLOCK(Entry
);
219 /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
220 if (PAGE_ALIGN(NextEntry
) != NextEntry
)
222 /* The two blocks must be on the same page! */
223 if (PAGE_ALIGN(Entry
) != PAGE_ALIGN(NextEntry
))
225 /* Something is messed up */
226 KeBugCheckEx(BAD_POOL_HEADER
,
228 (ULONG_PTR
)NextEntry
,
233 /* And this block should think we are as large as we truly are */
234 if (NextEntry
->PreviousSize
!= Entry
->BlockSize
)
236 /* Otherwise, someone corrupted the field */
237 KeBugCheckEx(BAD_POOL_HEADER
,
239 (ULONG_PTR
)NextEntry
,
248 ExpCheckPoolBlocks(IN PVOID Block
)
250 BOOLEAN FoundBlock
= FALSE
;
254 /* Get the first entry for this page, make sure it really is the first */
255 Entry
= PAGE_ALIGN(Block
);
256 ASSERT(Entry
->PreviousSize
== 0);
258 /* Now scan each entry */
261 /* When we actually found our block, remember this */
262 if (Entry
== Block
) FoundBlock
= TRUE
;
264 /* Now validate this block header */
265 ExpCheckPoolHeader(Entry
);
267 /* And go to the next one, keeping track of our size */
268 Size
+= Entry
->BlockSize
;
269 Entry
= POOL_NEXT_BLOCK(Entry
);
271 /* If we hit the last block, stop */
272 if (Size
>= (PAGE_SIZE
/ POOL_BLOCK_SIZE
)) break;
274 /* If we hit the end of the page, stop */
275 if (PAGE_ALIGN(Entry
) == Entry
) break;
278 /* We must've found our block, and we must have hit the end of the page */
279 if ((PAGE_ALIGN(Entry
) != Entry
) || !(FoundBlock
))
281 /* Otherwise, the blocks are messed up */
282 KeBugCheckEx(BAD_POOL_HEADER
, 10, (ULONG_PTR
)Block
, __LINE__
, (ULONG_PTR
)Entry
);
286 /* PRIVATE FUNCTIONS **********************************************************/
291 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor
,
292 IN POOL_TYPE PoolType
,
297 PLIST_ENTRY NextEntry
, LastEntry
;
300 // Setup the descriptor based on the caller's request
302 PoolDescriptor
->PoolType
= PoolType
;
303 PoolDescriptor
->PoolIndex
= PoolIndex
;
304 PoolDescriptor
->Threshold
= Threshold
;
305 PoolDescriptor
->LockAddress
= PoolLock
;
308 // Initialize accounting data
310 PoolDescriptor
->RunningAllocs
= 0;
311 PoolDescriptor
->RunningDeAllocs
= 0;
312 PoolDescriptor
->TotalPages
= 0;
313 PoolDescriptor
->TotalBytes
= 0;
314 PoolDescriptor
->TotalBigPages
= 0;
317 // Nothing pending for now
319 PoolDescriptor
->PendingFrees
= NULL
;
320 PoolDescriptor
->PendingFreeDepth
= 0;
323 // Loop all the descriptor's allocation lists and initialize them
325 NextEntry
= PoolDescriptor
->ListHeads
;
326 LastEntry
= NextEntry
+ POOL_LISTS_PER_PAGE
;
327 while (NextEntry
< LastEntry
)
329 ExpInitializePoolListHead(NextEntry
);
337 InitializePool(IN POOL_TYPE PoolType
,
340 PPOOL_DESCRIPTOR Descriptor
;
343 // Check what kind of pool this is
345 if (PoolType
== NonPagedPool
)
348 // Initialize the nonpaged pool descriptor
350 PoolVector
[NonPagedPool
] = &NonPagedPoolDescriptor
;
351 ExInitializePoolDescriptor(PoolVector
[NonPagedPool
],
360 // Allocate the pool descriptor
362 Descriptor
= ExAllocatePoolWithTag(NonPagedPool
,
363 sizeof(KGUARDED_MUTEX
) +
364 sizeof(POOL_DESCRIPTOR
),
369 // This is really bad...
371 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY
,
379 // Setup the vector and guarded mutex for paged pool
381 PoolVector
[PagedPool
] = Descriptor
;
382 ExpPagedPoolMutex
= (PKGUARDED_MUTEX
)(Descriptor
+ 1);
383 KeInitializeGuardedMutex(ExpPagedPoolMutex
);
384 ExInitializePoolDescriptor(Descriptor
,
394 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor
)
397 // Check if this is nonpaged pool
399 if ((Descriptor
->PoolType
& BASE_POOL_TYPE_MASK
) == NonPagedPool
)
402 // Use the queued spin lock
404 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock
);
409 // Use the guarded mutex
411 KeAcquireGuardedMutex(Descriptor
->LockAddress
);
418 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor
,
422 // Check if this is nonpaged pool
424 if ((Descriptor
->PoolType
& BASE_POOL_TYPE_MASK
) == NonPagedPool
)
427 // Use the queued spin lock
429 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock
, OldIrql
);
434 // Use the guarded mutex
436 KeReleaseGuardedMutex(Descriptor
->LockAddress
);
440 /* PUBLIC FUNCTIONS ***********************************************************/
447 ExAllocatePoolWithTag(IN POOL_TYPE PoolType
,
448 IN SIZE_T NumberOfBytes
,
451 PPOOL_DESCRIPTOR PoolDesc
;
452 PLIST_ENTRY ListHead
;
453 PPOOL_HEADER Entry
, NextEntry
, FragmentEntry
;
458 // Some sanity checks
461 ASSERT(Tag
!= ' GIB');
462 ASSERT(NumberOfBytes
!= 0);
465 // Get the pool type and its corresponding vector for this request
467 PoolType
= PoolType
& BASE_POOL_TYPE_MASK
;
468 PoolDesc
= PoolVector
[PoolType
];
469 ASSERT(PoolDesc
!= NULL
);
472 // Check if this is a special pool allocation
474 if (MmUseSpecialPool(NumberOfBytes
, Tag
))
477 // Try to allocate using special pool
479 Entry
= MmAllocateSpecialPool(NumberOfBytes
, Tag
, PoolType
, 2);
480 if (Entry
) return Entry
;
484 // Check if this is a big page allocation
486 if (NumberOfBytes
> POOL_MAX_ALLOC
)
489 // Then just return the number of pages requested
491 return MiAllocatePoolPages(PoolType
, NumberOfBytes
);
495 // Should never request 0 bytes from the pool, but since so many drivers do
496 // it, we'll just assume they want 1 byte, based on NT's similar behavior
498 if (!NumberOfBytes
) NumberOfBytes
= 1;
501 // A pool allocation is defined by its data, a linked list to connect it to
502 // the free list (if necessary), and a pool header to store accounting info.
503 // Calculate this size, then convert it into a block size (units of pool
506 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
507 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
508 // the direct allocation of pages.
510 i
= (USHORT
)((NumberOfBytes
+ sizeof(POOL_HEADER
) + (POOL_BLOCK_SIZE
- 1))
514 // Loop in the free lists looking for a block if this size. Start with the
515 // list optimized for this kind of size lookup
517 ListHead
= &PoolDesc
->ListHeads
[i
];
521 // Are there any free entries available on this list?
523 if (!ExpIsPoolListEmpty(ListHead
))
526 // Acquire the pool lock now
528 OldIrql
= ExLockPool(PoolDesc
);
531 // And make sure the list still has entries
533 if (ExpIsPoolListEmpty(ListHead
))
536 // Someone raced us (and won) before we had a chance to acquire
541 ExUnlockPool(PoolDesc
, OldIrql
);
547 // Remove a free entry from the list
548 // Note that due to the way we insert free blocks into multiple lists
549 // there is a guarantee that any block on this list will either be
550 // of the correct size, or perhaps larger.
552 ExpCheckPoolLinks(ListHead
);
553 Entry
= POOL_ENTRY(ExpRemovePoolHeadList(ListHead
));
554 ExpCheckPoolLinks(ListHead
);
555 ExpCheckPoolBlocks(Entry
);
556 ASSERT(Entry
->BlockSize
>= i
);
557 ASSERT(Entry
->PoolType
== 0);
560 // Check if this block is larger that what we need. The block could
561 // not possibly be smaller, due to the reason explained above (and
562 // we would've asserted on a checked build if this was the case).
564 if (Entry
->BlockSize
!= i
)
567 // Is there an entry before this one?
569 if (Entry
->PreviousSize
== 0)
572 // There isn't anyone before us, so take the next block and
573 // turn it into a fragment that contains the leftover data
574 // that we don't need to satisfy the caller's request
576 FragmentEntry
= POOL_BLOCK(Entry
, i
);
577 FragmentEntry
->BlockSize
= Entry
->BlockSize
- i
;
580 // And make it point back to us
582 FragmentEntry
->PreviousSize
= i
;
585 // Now get the block that follows the new fragment and check
586 // if it's still on the same page as us (and not at the end)
588 NextEntry
= POOL_NEXT_BLOCK(FragmentEntry
);
589 if (PAGE_ALIGN(NextEntry
) != NextEntry
)
592 // Adjust this next block to point to our newly created
595 NextEntry
->PreviousSize
= FragmentEntry
->BlockSize
;
601 // There is a free entry before us, which we know is smaller
602 // so we'll make this entry the fragment instead
604 FragmentEntry
= Entry
;
607 // And then we'll remove from it the actual size required.
608 // Now the entry is a leftover free fragment
610 Entry
->BlockSize
-= i
;
613 // Now let's go to the next entry after the fragment (which
614 // used to point to our original free entry) and make it
615 // reference the new fragment entry instead.
617 // This is the entry that will actually end up holding the
620 Entry
= POOL_NEXT_BLOCK(Entry
);
621 Entry
->PreviousSize
= FragmentEntry
->BlockSize
;
624 // And now let's go to the entry after that one and check if
625 // it's still on the same page, and not at the end
627 NextEntry
= POOL_BLOCK(Entry
, i
);
628 if (PAGE_ALIGN(NextEntry
) != NextEntry
)
631 // Make it reference the allocation entry
633 NextEntry
->PreviousSize
= i
;
638 // Now our (allocation) entry is the right size
640 Entry
->BlockSize
= i
;
643 // And the next entry is now the free fragment which contains
644 // the remaining difference between how big the original entry
645 // was, and the actual size the caller needs/requested.
647 FragmentEntry
->PoolType
= 0;
648 BlockSize
= FragmentEntry
->BlockSize
;
651 // Now check if enough free bytes remained for us to have a
652 // "full" entry, which contains enough bytes for a linked list
653 // and thus can be used for allocations (up to 8 bytes...)
655 ExpCheckPoolLinks(&PoolDesc
->ListHeads
[BlockSize
- 1]);
659 // Insert the free entry into the free list for this size
661 ExpInsertPoolTailList(&PoolDesc
->ListHeads
[BlockSize
- 1],
662 POOL_FREE_BLOCK(FragmentEntry
));
663 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry
));
668 // We have found an entry for this allocation, so set the pool type
669 // and release the lock since we're done
671 Entry
->PoolType
= PoolType
+ 1;
672 ExpCheckPoolBlocks(Entry
);
673 ExUnlockPool(PoolDesc
, OldIrql
);
676 // Return the pool allocation
678 Entry
->PoolTag
= Tag
;
679 (POOL_FREE_BLOCK(Entry
))->Flink
= NULL
;
680 (POOL_FREE_BLOCK(Entry
))->Blink
= NULL
;
681 return POOL_FREE_BLOCK(Entry
);
683 } while (++ListHead
!= &PoolDesc
->ListHeads
[POOL_LISTS_PER_PAGE
]);
686 // There were no free entries left, so we have to allocate a new fresh page
688 Entry
= MiAllocatePoolPages(PoolType
, PAGE_SIZE
);
693 Entry
->BlockSize
= i
;
694 Entry
->PoolType
= PoolType
+ 1;
697 // This page will have two entries -- one for the allocation (which we just
698 // created above), and one for the remaining free bytes, which we're about
699 // to create now. The free bytes are the whole page minus what was allocated
700 // and then converted into units of block headers.
702 BlockSize
= (PAGE_SIZE
/ POOL_BLOCK_SIZE
) - i
;
703 FragmentEntry
= POOL_BLOCK(Entry
, i
);
704 FragmentEntry
->Ulong1
= 0;
705 FragmentEntry
->BlockSize
= BlockSize
;
706 FragmentEntry
->PreviousSize
= i
;
709 // Now check if enough free bytes remained for us to have a "full" entry,
710 // which contains enough bytes for a linked list and thus can be used for
711 // allocations (up to 8 bytes...)
713 if (FragmentEntry
->BlockSize
!= 1)
716 // Excellent -- acquire the pool lock
718 OldIrql
= ExLockPool(PoolDesc
);
721 // And insert the free entry into the free list for this block size
723 ExpCheckPoolLinks(&PoolDesc
->ListHeads
[BlockSize
- 1]);
724 ExpInsertPoolTailList(&PoolDesc
->ListHeads
[BlockSize
- 1],
725 POOL_FREE_BLOCK(FragmentEntry
));
726 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry
));
729 // Release the pool lock
731 ExpCheckPoolBlocks(Entry
);
732 ExUnlockPool(PoolDesc
, OldIrql
);
736 // And return the pool allocation
738 ExpCheckPoolBlocks(Entry
);
739 Entry
->PoolTag
= Tag
;
740 return POOL_FREE_BLOCK(Entry
);
748 ExAllocatePool(POOL_TYPE PoolType
,
749 SIZE_T NumberOfBytes
)
752 // Use a default tag of "None"
754 return ExAllocatePoolWithTag(PoolType
, NumberOfBytes
, TAG_NONE
);
762 ExFreePoolWithTag(IN PVOID P
,
765 PPOOL_HEADER Entry
, NextEntry
;
769 PPOOL_DESCRIPTOR PoolDesc
;
770 BOOLEAN Combined
= FALSE
;
773 // Check if it was allocated from a special pool
775 if (MmIsSpecialPoolAddress(P
))
778 // It is, so handle it via special pool free routine
780 MmFreeSpecialPool(P
);
785 // Quickly deal with big page allocations
787 if (PAGE_ALIGN(P
) == P
)
794 // Get the entry for this pool allocation
795 // The pointer math here may look wrong or confusing, but it is quite right
801 // Get the size of the entry, and it's pool type, then load the descriptor
802 // for this pool type
804 BlockSize
= Entry
->BlockSize
;
805 PoolType
= (Entry
->PoolType
- 1) & BASE_POOL_TYPE_MASK
;
806 PoolDesc
= PoolVector
[PoolType
];
809 // Get the pointer to the next entry
811 NextEntry
= POOL_BLOCK(Entry
, BlockSize
);
814 // Acquire the pool lock
816 OldIrql
= ExLockPool(PoolDesc
);
821 if (TagToFree
&& TagToFree
!= Entry
->PoolTag
)
823 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree
, (char*)&Entry
->PoolTag
);
824 KeBugCheckEx(BAD_POOL_CALLER
, 0x0A, (ULONG_PTR
)P
, Entry
->PoolTag
, TagToFree
);
828 // Check if the next allocation is at the end of the page
830 ExpCheckPoolBlocks(Entry
);
831 if (PAGE_ALIGN(NextEntry
) != NextEntry
)
834 // We may be able to combine the block if it's free
836 if (NextEntry
->PoolType
== 0)
839 // The next block is free, so we'll do a combine
844 // Make sure there's actual data in the block -- anything smaller
845 // than this means we only have the header, so there's no linked list
848 if ((NextEntry
->BlockSize
!= 1))
851 // The block is at least big enough to have a linked list, so go
852 // ahead and remove it
854 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry
));
855 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry
));
856 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry
))->Flink
));
857 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry
))->Blink
));
861 // Our entry is now combined with the next entry
863 Entry
->BlockSize
= Entry
->BlockSize
+ NextEntry
->BlockSize
;
868 // Now check if there was a previous entry on the same page as us
870 if (Entry
->PreviousSize
)
873 // Great, grab that entry and check if it's free
875 NextEntry
= POOL_PREV_BLOCK(Entry
);
876 if (NextEntry
->PoolType
== 0)
879 // It is, so we can do a combine
884 // Make sure there's actual data in the block -- anything smaller
885 // than this means we only have the header so there's no linked list
888 if ((NextEntry
->BlockSize
!= 1))
891 // The block is at least big enough to have a linked list, so go
892 // ahead and remove it
894 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry
));
895 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry
));
896 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry
))->Flink
));
897 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry
))->Blink
));
901 // Combine our original block (which might've already been combined
902 // with the next block), into the previous block
904 NextEntry
->BlockSize
= NextEntry
->BlockSize
+ Entry
->BlockSize
;
907 // And now we'll work with the previous block instead
914 // By now, it may have been possible for our combined blocks to actually
915 // have made up a full page (if there were only 2-3 allocations on the
916 // page, they could've all been combined).
918 if ((PAGE_ALIGN(Entry
) == Entry
) &&
919 (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry
)) == POOL_NEXT_BLOCK(Entry
)))
922 // In this case, release the pool lock, and free the page
924 ExUnlockPool(PoolDesc
, OldIrql
);
925 MiFreePoolPages(Entry
);
930 // Otherwise, we now have a free block (or a combination of 2 or 3)
933 BlockSize
= Entry
->BlockSize
;
934 ASSERT(BlockSize
!= 1);
937 // Check if we actually did combine it with anyone
942 // Get the first combined block (either our original to begin with, or
943 // the one after the original, depending if we combined with the previous)
945 NextEntry
= POOL_NEXT_BLOCK(Entry
);
948 // As long as the next block isn't on a page boundary, have it point
951 if (PAGE_ALIGN(NextEntry
) != NextEntry
) NextEntry
->PreviousSize
= BlockSize
;
955 // Insert this new free block, and release the pool lock
957 ExpInsertPoolHeadList(&PoolDesc
->ListHeads
[BlockSize
- 1], POOL_FREE_BLOCK(Entry
));
958 ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry
));
959 ExUnlockPool(PoolDesc
, OldIrql
);
970 // Just free without checking for the tag
972 ExFreePoolWithTag(P
, 0);
980 ExQueryPoolBlockSize(IN PVOID PoolBlock
,
981 OUT PBOOLEAN QuotaCharged
)
996 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType
,
997 IN SIZE_T NumberOfBytes
)
1000 // Allocate the pool
1002 return ExAllocatePoolWithQuotaTag(PoolType
, NumberOfBytes
, 'enoN');
1010 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType
,
1011 IN SIZE_T NumberOfBytes
,
1013 IN EX_POOL_PRIORITY Priority
)
1016 // Allocate the pool
1019 return ExAllocatePoolWithTag(PoolType
, NumberOfBytes
, Tag
);
1027 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType
,
1028 IN SIZE_T NumberOfBytes
,
1032 // Allocate the pool
1035 return ExAllocatePoolWithTag(PoolType
, NumberOfBytes
, Tag
);