2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
9 /* INCLUDES *******************************************************************/
15 #line 15 "ARMĀ³::EXPOOL"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
19 #undef ExAllocatePoolWithQuota
20 #undef ExAllocatePoolWithQuotaTag
22 /* GLOBALS ********************************************************************/
24 ULONG ExpNumberOfPagedPools
;
25 POOL_DESCRIPTOR NonPagedPoolDescriptor
;
26 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor
[16 + 1];
27 PPOOL_DESCRIPTOR PoolVector
[2];
29 PKGUARDED_MUTEX ExpPagedPoolMutex
;
31 /* PRIVATE FUNCTIONS **********************************************************/
35 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor
,
36 IN POOL_TYPE PoolType
,
41 PLIST_ENTRY NextEntry
, LastEntry
;
44 // Setup the descriptor based on the caller's request
46 PoolDescriptor
->PoolType
= PoolType
;
47 PoolDescriptor
->PoolIndex
= PoolIndex
;
48 PoolDescriptor
->Threshold
= Threshold
;
49 PoolDescriptor
->LockAddress
= PoolLock
;
52 // Initialize accounting data
54 PoolDescriptor
->RunningAllocs
= 0;
55 PoolDescriptor
->RunningDeAllocs
= 0;
56 PoolDescriptor
->TotalPages
= 0;
57 PoolDescriptor
->TotalBytes
= 0;
58 PoolDescriptor
->TotalBigPages
= 0;
61 // Nothing pending for now
63 PoolDescriptor
->PendingFrees
= NULL
;
64 PoolDescriptor
->PendingFreeDepth
= 0;
67 // Loop all the descriptor's allocation lists and initialize them
69 NextEntry
= PoolDescriptor
->ListHeads
;
70 LastEntry
= NextEntry
+ POOL_LISTS_PER_PAGE
;
71 while (NextEntry
< LastEntry
) InitializeListHead(NextEntry
++);
76 InitializePool(IN POOL_TYPE PoolType
,
79 PPOOL_DESCRIPTOR Descriptor
;
82 // Check what kind of pool this is
84 if (PoolType
== NonPagedPool
)
87 // Initialize the nonpaged pool descriptor
89 PoolVector
[NonPagedPool
] = &NonPagedPoolDescriptor
;
90 ExInitializePoolDescriptor(PoolVector
[NonPagedPool
],
99 // Allocate the pool descriptor
101 Descriptor
= ExAllocatePoolWithTag(NonPagedPool
,
102 sizeof(KGUARDED_MUTEX
) +
103 sizeof(POOL_DESCRIPTOR
),
108 // This is really bad...
110 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY
,
118 // Setup the vector and guarded mutex for paged pool
120 PoolVector
[PagedPool
] = Descriptor
;
121 ExpPagedPoolMutex
= (PKGUARDED_MUTEX
)(Descriptor
+ 1);
122 KeInitializeGuardedMutex(ExpPagedPoolMutex
);
123 ExInitializePoolDescriptor(Descriptor
,
133 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor
)
136 // Check if this is nonpaged pool
138 if ((Descriptor
->PoolType
& BASE_POOL_TYPE_MASK
) == NonPagedPool
)
141 // Use the queued spin lock
143 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock
);
148 // Use the guarded mutex
150 KeAcquireGuardedMutex(Descriptor
->LockAddress
);
157 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor
,
161 // Check if this is nonpaged pool
163 if ((Descriptor
->PoolType
& BASE_POOL_TYPE_MASK
) == NonPagedPool
)
166 // Use the queued spin lock
168 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock
, OldIrql
);
173 // Use the guarded mutex
175 KeReleaseGuardedMutex(Descriptor
->LockAddress
);
179 /* PUBLIC FUNCTIONS ***********************************************************/
183 ExAllocateArmPoolWithTag(IN POOL_TYPE PoolType
,
184 IN SIZE_T NumberOfBytes
,
187 PPOOL_DESCRIPTOR PoolDesc
;
188 PLIST_ENTRY ListHead
;
189 PPOOL_HEADER Entry
, NextEntry
, FragmentEntry
;
194 // Some sanity checks
197 ASSERT(Tag
!= ' GIB');
198 ASSERT(NumberOfBytes
!= 0);
201 // Get the pool type and its corresponding vector for this request
203 PoolType
= PoolType
& BASE_POOL_TYPE_MASK
;
204 PoolDesc
= PoolVector
[PoolType
];
205 ASSERT(PoolDesc
!= NULL
);
208 // Check if this is a big page allocation
210 if (NumberOfBytes
> POOL_MAX_ALLOC
)
213 // Then just return the number of pages requested
215 return MiAllocatePoolPages(PoolType
, NumberOfBytes
);
219 // Should never request 0 bytes from the pool, but since so many drivers do
220 // it, we'll just assume they want 1 byte, based on NT's similar behavior
222 if (!NumberOfBytes
) NumberOfBytes
= 1;
225 // A pool allocation is defined by its data, a linked list to connect it to
226 // the free list (if necessary), and a pool header to store accounting info.
227 // Calculate this size, then convert it into a block size (units of pool
230 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
231 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
232 // the direct allocation of pages.
234 i
= (NumberOfBytes
+ sizeof(POOL_HEADER
) + sizeof(LIST_ENTRY
) - 1) /
238 // Loop in the free lists looking for a block if this size. Start with the
239 // list optimized for this kind of size lookup
241 ListHead
= &PoolDesc
->ListHeads
[i
];
245 // Are there any free entries available on this list?
247 if (!IsListEmpty(ListHead
))
250 // Acquire the pool lock now
252 OldIrql
= ExLockPool(PoolDesc
);
255 // And make sure the list still has entries
257 if (IsListEmpty(ListHead
))
260 // Someone raced us (and won) before we had a chance to acquire
265 ExUnlockPool(PoolDesc
, OldIrql
);
271 // Remove a free entry from the list
272 // Note that due to the way we insert free blocks into multiple lists
273 // there is a guarantee that any block on this list will either be
274 // of the correct size, or perhaps larger.
276 Entry
= (PPOOL_HEADER
)RemoveHeadList(ListHead
) - 1;
277 ASSERT(Entry
->BlockSize
>= i
);
278 ASSERT(Entry
->PoolType
== 0);
281 // Check if this block is larger that what we need. The block could
282 // not possibly be smaller, due to the reason explained above (and
283 // we would've asserted on a checked build if this was the case).
285 if (Entry
->BlockSize
!= i
)
288 // Is there an entry before this one?
290 if (Entry
->PreviousSize
== 0)
293 // There isn't anyone before us, so take the next block and
294 // turn it into a fragment that contains the leftover data
295 // that we don't need to satisfy the caller's request
297 FragmentEntry
= Entry
+ i
;
298 FragmentEntry
->BlockSize
= Entry
->BlockSize
- i
;
301 // And make it point back to us
303 FragmentEntry
->PreviousSize
= i
;
306 // Now get the block that follows the new fragment and check
307 // if it's still on the same page as us (and not at the end)
309 NextEntry
= FragmentEntry
+ FragmentEntry
->BlockSize
;
310 if (PAGE_ALIGN(NextEntry
) != NextEntry
)
313 // Adjust this next block to point to our newly created
316 NextEntry
->PreviousSize
= FragmentEntry
->BlockSize
;
322 // There is a free entry before us, which we know is smaller
323 // so we'll make this entry the fragment instead
325 FragmentEntry
= Entry
;
328 // And then we'll remove from it the actual size required.
329 // Now the entry is a leftover free fragment
331 Entry
->BlockSize
-= i
;
334 // Now let's go to the next entry after the fragment (which
335 // used to point to our original free entry) and make it
336 // reference the new fragment entry instead.
338 // This is the entry that will actually end up holding the
341 Entry
+= Entry
->BlockSize
;
342 Entry
->PreviousSize
= FragmentEntry
->BlockSize
;
345 // And now let's go to the entry after that one and check if
346 // it's still on the same page, and not at the end
348 NextEntry
= Entry
+ i
;
349 if (PAGE_ALIGN(NextEntry
) != NextEntry
)
352 // Make it reference the allocation entry
354 NextEntry
->PreviousSize
= i
;
359 // Now our (allocation) entry is the right size
361 Entry
->BlockSize
= i
;
364 // And the next entry is now the free fragment which contains
365 // the remaining difference between how big the original entry
366 // was, and the actual size the caller needs/requested.
368 FragmentEntry
->PoolType
= 0;
369 BlockSize
= FragmentEntry
->BlockSize
;
372 // Now check if enough free bytes remained for us to have a
373 // "full" entry, which contains enough bytes for a linked list
374 // and thus can be used for allocations (up to 8 bytes...)
379 // Insert the free entry into the free list for this size
381 InsertTailList(&PoolDesc
->ListHeads
[BlockSize
- 1],
382 (PLIST_ENTRY
)FragmentEntry
+ 1);
387 // We have found an entry for this allocation, so set the pool type
388 // and release the lock since we're done
390 Entry
->PoolType
= PoolType
+ 1;
391 ExUnlockPool(PoolDesc
, OldIrql
);
394 // Return the pool allocation
396 Entry
->PoolTag
= Tag
;
399 } while (++ListHead
!= &PoolDesc
->ListHeads
[POOL_LISTS_PER_PAGE
]);
402 // There were no free entries left, so we have to allocate a new fresh page
404 Entry
= MiAllocatePoolPages(PoolType
, PAGE_SIZE
);
406 Entry
->BlockSize
= i
;
407 Entry
->PoolType
= PoolType
+ 1;
410 // This page will have two entries -- one for the allocation (which we just
411 // created above), and one for the remaining free bytes, which we're about
412 // to create now. The free bytes are the whole page minus what was allocated
413 // and then converted into units of block headers.
415 BlockSize
= (PAGE_SIZE
/ sizeof(POOL_HEADER
)) - i
;
416 FragmentEntry
= Entry
+ i
;
417 FragmentEntry
->Ulong1
= 0;
418 FragmentEntry
->BlockSize
= BlockSize
;
419 FragmentEntry
->PreviousSize
= i
;
422 // Now check if enough free bytes remained for us to have a "full" entry,
423 // which contains enough bytes for a linked list and thus can be used for
424 // allocations (up to 8 bytes...)
426 if (FragmentEntry
->BlockSize
!= 1)
429 // Excellent -- acquire the pool lock
431 OldIrql
= ExLockPool(PoolDesc
);
434 // And insert the free entry into the free list for this block size
436 InsertTailList(&PoolDesc
->ListHeads
[BlockSize
- 1],
437 (PLIST_ENTRY
)FragmentEntry
+ 1);
440 // Release the pool lock
442 ExUnlockPool(PoolDesc
, OldIrql
);
446 // And return the pool allocation
448 Entry
->PoolTag
= Tag
;
454 ExAllocateArmPool(POOL_TYPE PoolType
,
455 SIZE_T NumberOfBytes
)
458 // Use a default tag of "None"
460 return ExAllocatePoolWithTag(PoolType
, NumberOfBytes
, 'enoN');
465 ExFreeArmPoolWithTag(IN PVOID P
,
468 PPOOL_HEADER Entry
, NextEntry
;
472 PPOOL_DESCRIPTOR PoolDesc
;
473 BOOLEAN Combined
= FALSE
;
476 // Quickly deal with big page allocations
478 if (PAGE_ALIGN(P
) == P
)
485 // Get the entry for this pool allocation
486 // The pointer math here may look wrong or confusing, but it is quite right
492 // Get the size of the entry, and it's pool type, then load the descriptor
493 // for this pool type
495 BlockSize
= Entry
->BlockSize
;
496 PoolType
= (Entry
->PoolType
& 3) - 1;
497 PoolDesc
= PoolVector
[PoolType
];
500 // Get the pointer to the next entry
502 NextEntry
= Entry
+ BlockSize
;
505 // Acquire the pool lock
507 OldIrql
= ExLockPool(PoolDesc
);
510 // Check if the next allocation is at the end of the page
512 if (PAGE_ALIGN(NextEntry
) != NextEntry
)
515 // We may be able to combine the block if it's free
517 if (NextEntry
->PoolType
== 0)
520 // The next block is free, so we'll do a combine
525 // Make sure there's actual data in the block -- anything smaller
526 // than this means we only have the header, so there's no linked list
529 if ((NextEntry
->BlockSize
!= 1))
532 // The block is at least big enough to have a linked list, so go
533 // ahead and remove it
535 RemoveEntryList((PLIST_ENTRY
)NextEntry
+ 1);
539 // Our entry is now combined with the next entry
541 Entry
->BlockSize
= Entry
->BlockSize
+ NextEntry
->BlockSize
;
546 // Now check if there was a previous entry on the same page as us
548 if (Entry
->PreviousSize
)
551 // Great, grab that entry and check if it's free
553 NextEntry
= Entry
- Entry
->PreviousSize
;
554 if (NextEntry
->PoolType
== 0)
557 // It is, so we can do a combine
562 // Make sure there's actual data in the block -- anything smaller
563 // than this means we only have the header so there's no linked list
566 if ((NextEntry
->BlockSize
!= 1))
569 // The block is at least big enough to have a linked list, so go
570 // ahead and remove it
572 RemoveEntryList((PLIST_ENTRY
)NextEntry
+ 1);
576 // Combine our original block (which might've already been combined
577 // with the next block), into the previous block
579 NextEntry
->BlockSize
= NextEntry
->BlockSize
+ Entry
->BlockSize
;
582 // And now we'll work with the previous block instead
589 // By now, it may have been possible for our combined blocks to actually
590 // have made up a full page (if there were only 2-3 allocations on the
591 // page, they could've all been combined).
593 if ((PAGE_ALIGN(Entry
) == Entry
) &&
594 (PAGE_ALIGN(Entry
+ Entry
->BlockSize
) == Entry
+ Entry
->BlockSize
))
597 // In this case, release the pool lock, and free the page
599 ExUnlockPool(PoolDesc
, OldIrql
);
600 MiFreePoolPages(Entry
);
605 // Otherwise, we now have a free block (or a combination of 2 or 3)
608 BlockSize
= Entry
->BlockSize
;
609 ASSERT(BlockSize
!= 1);
612 // Check if we actually did combine it with anyone
617 // Get the first combined block (either our original to begin with, or
618 // the one after the original, depending if we combined with the previous)
620 NextEntry
= Entry
+ BlockSize
;
623 // As long as the next block isn't on a page boundary, have it point
626 if (PAGE_ALIGN(NextEntry
) != NextEntry
) NextEntry
->PreviousSize
= BlockSize
;
630 // Insert this new free block, and release the pool lock
632 InsertHeadList(&PoolDesc
->ListHeads
[BlockSize
- 1], (PLIST_ENTRY
)Entry
+ 1);
633 ExUnlockPool(PoolDesc
, OldIrql
);
638 ExFreeArmPool(PVOID P
)
641 // Just free without checking for the tag
643 ExFreeArmPoolWithTag(P
, 0);
651 ExQueryPoolBlockSize(IN PVOID PoolBlock
,
652 OUT PBOOLEAN QuotaCharged
)
667 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType
,
668 IN SIZE_T NumberOfBytes
)
673 return ExAllocatePoolWithQuotaTag(PoolType
, NumberOfBytes
, 'enoN');
681 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType
,
682 IN SIZE_T NumberOfBytes
,
684 IN EX_POOL_PRIORITY Priority
)
690 return ExAllocatePoolWithTag(PoolType
, NumberOfBytes
, Tag
);
698 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType
,
699 IN SIZE_T NumberOfBytes
,
706 return ExAllocatePoolWithTag(PoolType
, NumberOfBytes
, Tag
);