2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
9 /* INCLUDES *******************************************************************/
15 #line 15 "ARMĀ³::EXPOOL"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
19 #undef ExAllocatePoolWithQuota
20 #undef ExAllocatePoolWithQuotaTag
22 /* GLOBALS ********************************************************************/
24 ULONG ExpNumberOfPagedPools
;
25 POOL_DESCRIPTOR NonPagedPoolDescriptor
;
26 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor
[16 + 1];
27 PPOOL_DESCRIPTOR PoolVector
[2];
29 PKGUARDED_MUTEX ExpPagedPoolMutex
;
31 /* Pool block/header/list access macros */
32 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)x - sizeof(POOL_HEADER))
33 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)x + sizeof(POOL_HEADER))
34 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)x + ((i) * POOL_BLOCK_SIZE))
35 #define POOL_NEXT_BLOCK(x) POOL_BLOCK(x, x->BlockSize)
36 #define POOL_PREV_BLOCK(x) POOL_BLOCK(x, -x->PreviousSize)
38 /* PRIVATE FUNCTIONS **********************************************************/
42 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor
,
43 IN POOL_TYPE PoolType
,
48 PLIST_ENTRY NextEntry
, LastEntry
;
51 // Setup the descriptor based on the caller's request
53 PoolDescriptor
->PoolType
= PoolType
;
54 PoolDescriptor
->PoolIndex
= PoolIndex
;
55 PoolDescriptor
->Threshold
= Threshold
;
56 PoolDescriptor
->LockAddress
= PoolLock
;
59 // Initialize accounting data
61 PoolDescriptor
->RunningAllocs
= 0;
62 PoolDescriptor
->RunningDeAllocs
= 0;
63 PoolDescriptor
->TotalPages
= 0;
64 PoolDescriptor
->TotalBytes
= 0;
65 PoolDescriptor
->TotalBigPages
= 0;
68 // Nothing pending for now
70 PoolDescriptor
->PendingFrees
= NULL
;
71 PoolDescriptor
->PendingFreeDepth
= 0;
74 // Loop all the descriptor's allocation lists and initialize them
76 NextEntry
= PoolDescriptor
->ListHeads
;
77 LastEntry
= NextEntry
+ POOL_LISTS_PER_PAGE
;
78 while (NextEntry
< LastEntry
)
80 InitializeListHead(NextEntry
);
87 InitializePool(IN POOL_TYPE PoolType
,
90 PPOOL_DESCRIPTOR Descriptor
;
93 // Check what kind of pool this is
95 if (PoolType
== NonPagedPool
)
98 // Initialize the nonpaged pool descriptor
100 PoolVector
[NonPagedPool
] = &NonPagedPoolDescriptor
;
101 ExInitializePoolDescriptor(PoolVector
[NonPagedPool
],
110 // Allocate the pool descriptor
112 Descriptor
= ExAllocatePoolWithTag(NonPagedPool
,
113 sizeof(KGUARDED_MUTEX
) +
114 sizeof(POOL_DESCRIPTOR
),
119 // This is really bad...
121 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY
,
129 // Setup the vector and guarded mutex for paged pool
131 PoolVector
[PagedPool
] = Descriptor
;
132 ExpPagedPoolMutex
= (PKGUARDED_MUTEX
)(Descriptor
+ 1);
133 KeInitializeGuardedMutex(ExpPagedPoolMutex
);
134 ExInitializePoolDescriptor(Descriptor
,
144 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor
)
147 // Check if this is nonpaged pool
149 if ((Descriptor
->PoolType
& BASE_POOL_TYPE_MASK
) == NonPagedPool
)
152 // Use the queued spin lock
154 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock
);
159 // Use the guarded mutex
161 KeAcquireGuardedMutex(Descriptor
->LockAddress
);
168 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor
,
172 // Check if this is nonpaged pool
174 if ((Descriptor
->PoolType
& BASE_POOL_TYPE_MASK
) == NonPagedPool
)
177 // Use the queued spin lock
179 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock
, OldIrql
);
184 // Use the guarded mutex
186 KeReleaseGuardedMutex(Descriptor
->LockAddress
);
190 /* PUBLIC FUNCTIONS ***********************************************************/
197 ExAllocatePoolWithTag(IN POOL_TYPE PoolType
,
198 IN SIZE_T NumberOfBytes
,
201 PPOOL_DESCRIPTOR PoolDesc
;
202 PLIST_ENTRY ListHead
;
203 PPOOL_HEADER Entry
, NextEntry
, FragmentEntry
;
208 // Check for paged pool
210 if (PoolType
== PagedPool
) return ExAllocatePagedPoolWithTag(PagedPool
, NumberOfBytes
, Tag
);
213 // Some sanity checks
216 ASSERT(Tag
!= ' GIB');
217 ASSERT(NumberOfBytes
!= 0);
220 // Get the pool type and its corresponding vector for this request
222 PoolType
= PoolType
& BASE_POOL_TYPE_MASK
;
223 PoolDesc
= PoolVector
[PoolType
];
224 ASSERT(PoolDesc
!= NULL
);
227 // Check if this is a big page allocation
229 if (NumberOfBytes
> POOL_MAX_ALLOC
)
232 // Then just return the number of pages requested
234 return MiAllocatePoolPages(PoolType
, NumberOfBytes
);
238 // Should never request 0 bytes from the pool, but since so many drivers do
239 // it, we'll just assume they want 1 byte, based on NT's similar behavior
241 if (!NumberOfBytes
) NumberOfBytes
= 1;
244 // A pool allocation is defined by its data, a linked list to connect it to
245 // the free list (if necessary), and a pool header to store accounting info.
246 // Calculate this size, then convert it into a block size (units of pool
249 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
250 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
251 // the direct allocation of pages.
253 i
= (NumberOfBytes
+ sizeof(POOL_HEADER
) + (POOL_BLOCK_SIZE
- 1)) / POOL_BLOCK_SIZE
;
256 // Loop in the free lists looking for a block if this size. Start with the
257 // list optimized for this kind of size lookup
259 ListHead
= &PoolDesc
->ListHeads
[i
];
263 // Are there any free entries available on this list?
265 if (!IsListEmpty(ListHead
))
268 // Acquire the pool lock now
270 OldIrql
= ExLockPool(PoolDesc
);
273 // And make sure the list still has entries
275 if (IsListEmpty(ListHead
))
278 // Someone raced us (and won) before we had a chance to acquire
283 ExUnlockPool(PoolDesc
, OldIrql
);
289 // Remove a free entry from the list
290 // Note that due to the way we insert free blocks into multiple lists
291 // there is a guarantee that any block on this list will either be
292 // of the correct size, or perhaps larger.
294 Entry
= POOL_ENTRY(RemoveHeadList(ListHead
));
295 ASSERT(Entry
->BlockSize
>= i
);
296 ASSERT(Entry
->PoolType
== 0);
299 // Check if this block is larger that what we need. The block could
300 // not possibly be smaller, due to the reason explained above (and
301 // we would've asserted on a checked build if this was the case).
303 if (Entry
->BlockSize
!= i
)
306 // Is there an entry before this one?
308 if (Entry
->PreviousSize
== 0)
311 // There isn't anyone before us, so take the next block and
312 // turn it into a fragment that contains the leftover data
313 // that we don't need to satisfy the caller's request
315 FragmentEntry
= POOL_BLOCK(Entry
, i
);
316 FragmentEntry
->BlockSize
= Entry
->BlockSize
- i
;
319 // And make it point back to us
321 FragmentEntry
->PreviousSize
= i
;
324 // Now get the block that follows the new fragment and check
325 // if it's still on the same page as us (and not at the end)
327 NextEntry
= POOL_NEXT_BLOCK(FragmentEntry
);
328 if (PAGE_ALIGN(NextEntry
) != NextEntry
)
331 // Adjust this next block to point to our newly created
334 NextEntry
->PreviousSize
= FragmentEntry
->BlockSize
;
340 // There is a free entry before us, which we know is smaller
341 // so we'll make this entry the fragment instead
343 FragmentEntry
= Entry
;
346 // And then we'll remove from it the actual size required.
347 // Now the entry is a leftover free fragment
349 Entry
->BlockSize
-= i
;
352 // Now let's go to the next entry after the fragment (which
353 // used to point to our original free entry) and make it
354 // reference the new fragment entry instead.
356 // This is the entry that will actually end up holding the
359 Entry
= POOL_NEXT_BLOCK(Entry
);
360 Entry
->PreviousSize
= FragmentEntry
->BlockSize
;
363 // And now let's go to the entry after that one and check if
364 // it's still on the same page, and not at the end
366 NextEntry
= POOL_BLOCK(Entry
, i
);
367 if (PAGE_ALIGN(NextEntry
) != NextEntry
)
370 // Make it reference the allocation entry
372 NextEntry
->PreviousSize
= i
;
377 // Now our (allocation) entry is the right size
379 Entry
->BlockSize
= i
;
382 // And the next entry is now the free fragment which contains
383 // the remaining difference between how big the original entry
384 // was, and the actual size the caller needs/requested.
386 FragmentEntry
->PoolType
= 0;
387 BlockSize
= FragmentEntry
->BlockSize
;
390 // Now check if enough free bytes remained for us to have a
391 // "full" entry, which contains enough bytes for a linked list
392 // and thus can be used for allocations (up to 8 bytes...)
397 // Insert the free entry into the free list for this size
399 InsertTailList(&PoolDesc
->ListHeads
[BlockSize
- 1],
400 POOL_FREE_BLOCK(FragmentEntry
));
405 // We have found an entry for this allocation, so set the pool type
406 // and release the lock since we're done
408 Entry
->PoolType
= PoolType
+ 1;
409 ExUnlockPool(PoolDesc
, OldIrql
);
412 // Return the pool allocation
414 Entry
->PoolTag
= Tag
;
415 (POOL_FREE_BLOCK(Entry
))->Flink
= NULL
;
416 (POOL_FREE_BLOCK(Entry
))->Blink
= NULL
;
417 return POOL_FREE_BLOCK(Entry
);
419 } while (++ListHead
!= &PoolDesc
->ListHeads
[POOL_LISTS_PER_PAGE
]);
422 // There were no free entries left, so we have to allocate a new fresh page
424 Entry
= MiAllocatePoolPages(PoolType
, PAGE_SIZE
);
425 ASSERT(Entry
!= NULL
);
427 Entry
->BlockSize
= i
;
428 Entry
->PoolType
= PoolType
+ 1;
431 // This page will have two entries -- one for the allocation (which we just
432 // created above), and one for the remaining free bytes, which we're about
433 // to create now. The free bytes are the whole page minus what was allocated
434 // and then converted into units of block headers.
436 BlockSize
= (PAGE_SIZE
/ POOL_BLOCK_SIZE
) - i
;
437 FragmentEntry
= POOL_BLOCK(Entry
, i
);
438 FragmentEntry
->Ulong1
= 0;
439 FragmentEntry
->BlockSize
= BlockSize
;
440 FragmentEntry
->PreviousSize
= i
;
443 // Now check if enough free bytes remained for us to have a "full" entry,
444 // which contains enough bytes for a linked list and thus can be used for
445 // allocations (up to 8 bytes...)
447 if (FragmentEntry
->BlockSize
!= 1)
450 // Excellent -- acquire the pool lock
452 OldIrql
= ExLockPool(PoolDesc
);
455 // And insert the free entry into the free list for this block size
457 InsertTailList(&PoolDesc
->ListHeads
[BlockSize
- 1],
458 POOL_FREE_BLOCK(FragmentEntry
));
461 // Release the pool lock
463 ExUnlockPool(PoolDesc
, OldIrql
);
467 // And return the pool allocation
469 Entry
->PoolTag
= Tag
;
470 return POOL_FREE_BLOCK(Entry
);
478 ExAllocatePool(POOL_TYPE PoolType
,
479 SIZE_T NumberOfBytes
)
482 // Use a default tag of "None"
484 return ExAllocatePoolWithTag(PoolType
, NumberOfBytes
, 'enoN');
492 ExFreePoolWithTag(IN PVOID P
,
495 PPOOL_HEADER Entry
, NextEntry
;
499 PPOOL_DESCRIPTOR PoolDesc
;
500 BOOLEAN Combined
= FALSE
;
503 // Check for paged pool
505 if ((P
>= MmPagedPoolBase
) &&
506 (P
<= (PVOID
)((ULONG_PTR
)MmPagedPoolBase
+ MmPagedPoolSize
)))
517 // Quickly deal with big page allocations
519 if (PAGE_ALIGN(P
) == P
)
526 // Get the entry for this pool allocation
527 // The pointer math here may look wrong or confusing, but it is quite right
533 // Get the size of the entry, and it's pool type, then load the descriptor
534 // for this pool type
536 BlockSize
= Entry
->BlockSize
;
537 PoolType
= (Entry
->PoolType
- 1) & BASE_POOL_TYPE_MASK
;
538 PoolDesc
= PoolVector
[PoolType
];
541 // Get the pointer to the next entry
543 NextEntry
= POOL_BLOCK(Entry
, BlockSize
);
546 // Acquire the pool lock
548 OldIrql
= ExLockPool(PoolDesc
);
551 // Check if the next allocation is at the end of the page
553 if (PAGE_ALIGN(NextEntry
) != NextEntry
)
556 // We may be able to combine the block if it's free
558 if (NextEntry
->PoolType
== 0)
561 // The next block is free, so we'll do a combine
566 // Make sure there's actual data in the block -- anything smaller
567 // than this means we only have the header, so there's no linked list
570 if ((NextEntry
->BlockSize
!= 1))
573 // The block is at least big enough to have a linked list, so go
574 // ahead and remove it
576 RemoveEntryList(POOL_FREE_BLOCK(NextEntry
));
580 // Our entry is now combined with the next entry
582 Entry
->BlockSize
= Entry
->BlockSize
+ NextEntry
->BlockSize
;
587 // Now check if there was a previous entry on the same page as us
589 if (Entry
->PreviousSize
)
592 // Great, grab that entry and check if it's free
594 NextEntry
= POOL_PREV_BLOCK(Entry
);
595 if (NextEntry
->PoolType
== 0)
598 // It is, so we can do a combine
603 // Make sure there's actual data in the block -- anything smaller
604 // than this means we only have the header so there's no linked list
607 if ((NextEntry
->BlockSize
!= 1))
610 // The block is at least big enough to have a linked list, so go
611 // ahead and remove it
613 RemoveEntryList(POOL_FREE_BLOCK(NextEntry
));
617 // Combine our original block (which might've already been combined
618 // with the next block), into the previous block
620 NextEntry
->BlockSize
= NextEntry
->BlockSize
+ Entry
->BlockSize
;
623 // And now we'll work with the previous block instead
630 // By now, it may have been possible for our combined blocks to actually
631 // have made up a full page (if there were only 2-3 allocations on the
632 // page, they could've all been combined).
634 if ((PAGE_ALIGN(Entry
) == Entry
) &&
635 (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry
)) == POOL_NEXT_BLOCK(Entry
)))
638 // In this case, release the pool lock, and free the page
640 ExUnlockPool(PoolDesc
, OldIrql
);
641 MiFreePoolPages(Entry
);
646 // Otherwise, we now have a free block (or a combination of 2 or 3)
649 BlockSize
= Entry
->BlockSize
;
650 ASSERT(BlockSize
!= 1);
653 // Check if we actually did combine it with anyone
658 // Get the first combined block (either our original to begin with, or
659 // the one after the original, depending if we combined with the previous)
661 NextEntry
= POOL_NEXT_BLOCK(Entry
);
664 // As long as the next block isn't on a page boundary, have it point
667 if (PAGE_ALIGN(NextEntry
) != NextEntry
) NextEntry
->PreviousSize
= BlockSize
;
671 // Insert this new free block, and release the pool lock
673 InsertHeadList(&PoolDesc
->ListHeads
[BlockSize
- 1], POOL_FREE_BLOCK(Entry
));
674 ExUnlockPool(PoolDesc
, OldIrql
);
685 // Just free without checking for the tag
687 ExFreePoolWithTag(P
, 0);
695 ExQueryPoolBlockSize(IN PVOID PoolBlock
,
696 OUT PBOOLEAN QuotaCharged
)
711 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType
,
712 IN SIZE_T NumberOfBytes
)
717 return ExAllocatePoolWithQuotaTag(PoolType
, NumberOfBytes
, 'enoN');
725 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType
,
726 IN SIZE_T NumberOfBytes
,
728 IN EX_POOL_PRIORITY Priority
)
734 return ExAllocatePoolWithTag(PoolType
, NumberOfBytes
, Tag
);
742 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType
,
743 IN SIZE_T NumberOfBytes
,
750 return ExAllocatePoolWithTag(PoolType
, NumberOfBytes
, Tag
);