2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
9 /* INCLUDES *******************************************************************/
15 #line 15 "ARMĀ³::EXPOOL"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
19 #undef ExAllocatePoolWithQuota
20 #undef ExAllocatePoolWithQuotaTag
22 /* GLOBALS ********************************************************************/
24 POOL_DESCRIPTOR NonPagedPoolDescriptor
;
25 PPOOL_DESCRIPTOR PoolVector
[2];
26 PKGUARDED_MUTEX ExpPagedPoolMutex
;
28 /* PRIVATE FUNCTIONS **********************************************************/
32 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor
,
33 IN POOL_TYPE PoolType
,
38 PLIST_ENTRY NextEntry
, LastEntry
;
41 // Setup the descriptor based on the caller's request
43 PoolDescriptor
->PoolType
= PoolType
;
44 PoolDescriptor
->PoolIndex
= PoolIndex
;
45 PoolDescriptor
->Threshold
= Threshold
;
46 PoolDescriptor
->LockAddress
= PoolLock
;
49 // Initialize accounting data
51 PoolDescriptor
->RunningAllocs
= 0;
52 PoolDescriptor
->RunningDeAllocs
= 0;
53 PoolDescriptor
->TotalPages
= 0;
54 PoolDescriptor
->TotalBytes
= 0;
55 PoolDescriptor
->TotalBigPages
= 0;
58 // Nothing pending for now
60 PoolDescriptor
->PendingFrees
= NULL
;
61 PoolDescriptor
->PendingFreeDepth
= 0;
64 // Loop all the descriptor's allocation lists and initialize them
66 NextEntry
= PoolDescriptor
->ListHeads
;
67 LastEntry
= NextEntry
+ POOL_LISTS_PER_PAGE
;
68 while (NextEntry
< LastEntry
) InitializeListHead(NextEntry
++);
73 InitializePool(IN POOL_TYPE PoolType
,
76 PPOOL_DESCRIPTOR Descriptor
;
79 // Check what kind of pool this is
81 if (PoolType
== NonPagedPool
)
84 // Initialize the nonpaged pool descriptor
86 PoolVector
[NonPagedPool
] = &NonPagedPoolDescriptor
;
87 ExInitializePoolDescriptor(PoolVector
[NonPagedPool
],
96 // Allocate the pool descriptor
98 Descriptor
= ExAllocatePoolWithTag(NonPagedPool
,
99 sizeof(KGUARDED_MUTEX
) +
100 sizeof(POOL_DESCRIPTOR
),
105 // This is really bad...
107 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY
,
115 // Setup the vector and guarded mutex for paged pool
117 PoolVector
[PagedPool
] = Descriptor
;
118 ExpPagedPoolMutex
= (PKGUARDED_MUTEX
)(Descriptor
+ 1);
119 KeInitializeGuardedMutex(ExpPagedPoolMutex
);
120 ExInitializePoolDescriptor(Descriptor
,
130 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor
)
133 // Check if this is nonpaged pool
135 if ((Descriptor
->PoolType
& BASE_POOL_TYPE_MASK
) == NonPagedPool
)
138 // Use the queued spin lock
140 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock
);
145 // Use the guarded mutex
147 KeAcquireGuardedMutex(Descriptor
->LockAddress
);
154 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor
,
158 // Check if this is nonpaged pool
160 if ((Descriptor
->PoolType
& BASE_POOL_TYPE_MASK
) == NonPagedPool
)
163 // Use the queued spin lock
165 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock
, OldIrql
);
170 // Use the guarded mutex
172 KeReleaseGuardedMutex(Descriptor
->LockAddress
);
176 /* PUBLIC FUNCTIONS ***********************************************************/
180 ExAllocateArmPoolWithTag(IN POOL_TYPE PoolType
,
181 IN SIZE_T NumberOfBytes
,
184 PPOOL_DESCRIPTOR PoolDesc
;
185 PLIST_ENTRY ListHead
;
186 PPOOL_HEADER Entry
, NextEntry
, FragmentEntry
;
191 // Some sanity checks
194 ASSERT(Tag
!= ' GIB');
195 ASSERT(NumberOfBytes
!= 0);
198 // Get the pool type and its corresponding vector for this request
200 PoolType
= PoolType
& BASE_POOL_TYPE_MASK
;
201 PoolDesc
= PoolVector
[PoolType
];
202 ASSERT(PoolDesc
!= NULL
);
205 // Check if this is a big page allocation
207 if (NumberOfBytes
> POOL_MAX_ALLOC
)
210 // Then just return the number of pages requested
212 return MiAllocatePoolPages(PoolType
, NumberOfBytes
);
216 // Should never request 0 bytes from the pool, but since so many drivers do
217 // it, we'll just assume they want 1 byte, based on NT's similar behavior
219 if (!NumberOfBytes
) NumberOfBytes
= 1;
222 // A pool allocation is defined by its data, a linked list to connect it to
223 // the free list (if necessary), and a pool header to store accounting info.
224 // Calculate this size, then convert it into a block size (units of pool
227 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
228 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
229 // the direct allocation of pages.
231 i
= (NumberOfBytes
+ sizeof(POOL_HEADER
) + sizeof(LIST_ENTRY
) - 1) /
235 // Loop in the free lists looking for a block if this size. Start with the
236 // list optimized for this kind of size lookup
238 ListHead
= &PoolDesc
->ListHeads
[i
];
242 // Are there any free entries available on this list?
244 if (!IsListEmpty(ListHead
))
247 // Acquire the pool lock now
249 OldIrql
= ExLockPool(PoolDesc
);
252 // And make sure the list still has entries
254 if (IsListEmpty(ListHead
))
257 // Someone raced us (and won) before we had a chance to acquire
262 ExUnlockPool(PoolDesc
, OldIrql
);
268 // Remove a free entry from the list
269 // Note that due to the way we insert free blocks into multiple lists
270 // there is a guarantee that any block on this list will either be
271 // of the correct size, or perhaps larger.
273 Entry
= (PPOOL_HEADER
)RemoveHeadList(ListHead
) - 1;
274 ASSERT(Entry
->BlockSize
>= i
);
275 ASSERT(Entry
->PoolType
== 0);
278 // Check if this block is larger that what we need. The block could
279 // not possibly be smaller, due to the reason explained above (and
280 // we would've asserted on a checked build if this was the case).
282 if (Entry
->BlockSize
!= i
)
285 // Is there an entry before this one?
287 if (Entry
->PreviousSize
== 0)
290 // There isn't anyone before us, so take the next block and
291 // turn it into a fragment that contains the leftover data
292 // that we don't need to satisfy the caller's request
294 FragmentEntry
= Entry
+ i
;
295 FragmentEntry
->BlockSize
= Entry
->BlockSize
- i
;
298 // And make it point back to us
300 FragmentEntry
->PreviousSize
= i
;
303 // Now get the block that follows the new fragment and check
304 // if it's still on the same page as us (and not at the end)
306 NextEntry
= FragmentEntry
+ FragmentEntry
->BlockSize
;
307 if (PAGE_ALIGN(NextEntry
) != NextEntry
)
310 // Adjust this next block to point to our newly created
313 NextEntry
->PreviousSize
= FragmentEntry
->BlockSize
;
319 // There is a free entry before us, which we know is smaller
320 // so we'll make this entry the fragment instead
322 FragmentEntry
= Entry
;
325 // And then we'll remove from it the actual size required.
326 // Now the entry is a leftover free fragment
328 Entry
->BlockSize
-= i
;
331 // Now let's go to the next entry after the fragment (which
332 // used to point to our original free entry) and make it
333 // reference the new fragment entry instead.
335 // This is the entry that will actually end up holding the
338 Entry
+= Entry
->BlockSize
;
339 Entry
->PreviousSize
= FragmentEntry
->BlockSize
;
342 // And now let's go to the entry after that one and check if
343 // it's still on the same page, and not at the end
345 NextEntry
= Entry
+ i
;
346 if (PAGE_ALIGN(NextEntry
) != NextEntry
)
349 // Make it reference the allocation entry
351 NextEntry
->PreviousSize
= i
;
356 // Now our (allocation) entry is the right size
358 Entry
->BlockSize
= i
;
361 // And the next entry is now the free fragment which contains
362 // the remaining difference between how big the original entry
363 // was, and the actual size the caller needs/requested.
365 FragmentEntry
->PoolType
= 0;
366 BlockSize
= FragmentEntry
->BlockSize
;
369 // Now check if enough free bytes remained for us to have a
370 // "full" entry, which contains enough bytes for a linked list
371 // and thus can be used for allocations (up to 8 bytes...)
376 // Insert the free entry into the free list for this size
378 InsertTailList(&PoolDesc
->ListHeads
[BlockSize
- 1],
379 (PLIST_ENTRY
)FragmentEntry
+ 1);
384 // We have found an entry for this allocation, so set the pool type
385 // and release the lock since we're done
387 Entry
->PoolType
= PoolType
+ 1;
388 ExUnlockPool(PoolDesc
, OldIrql
);
391 // Return the pool allocation
393 Entry
->PoolTag
= Tag
;
396 } while (++ListHead
!= &PoolDesc
->ListHeads
[POOL_LISTS_PER_PAGE
]);
399 // There were no free entries left, so we have to allocate a new fresh page
401 Entry
= MiAllocatePoolPages(PoolType
, PAGE_SIZE
);
403 Entry
->BlockSize
= i
;
404 Entry
->PoolType
= PoolType
+ 1;
407 // This page will have two entries -- one for the allocation (which we just
408 // created above), and one for the remaining free bytes, which we're about
409 // to create now. The free bytes are the whole page minus what was allocated
410 // and then converted into units of block headers.
412 BlockSize
= (PAGE_SIZE
/ sizeof(POOL_HEADER
)) - i
;
413 FragmentEntry
= Entry
+ i
;
414 FragmentEntry
->Ulong1
= 0;
415 FragmentEntry
->BlockSize
= BlockSize
;
416 FragmentEntry
->PreviousSize
= i
;
419 // Now check if enough free bytes remained for us to have a "full" entry,
420 // which contains enough bytes for a linked list and thus can be used for
421 // allocations (up to 8 bytes...)
423 if (FragmentEntry
->BlockSize
!= 1)
426 // Excellent -- acquire the pool lock
428 OldIrql
= ExLockPool(PoolDesc
);
431 // And insert the free entry into the free list for this block size
433 InsertTailList(&PoolDesc
->ListHeads
[BlockSize
- 1],
434 (PLIST_ENTRY
)FragmentEntry
+ 1);
437 // Release the pool lock
439 ExUnlockPool(PoolDesc
, OldIrql
);
443 // And return the pool allocation
445 Entry
->PoolTag
= Tag
;
451 ExAllocateArmPool(POOL_TYPE PoolType
,
452 SIZE_T NumberOfBytes
)
455 // Use a default tag of "None"
457 return ExAllocatePoolWithTag(PoolType
, NumberOfBytes
, 'enoN');
462 ExFreeArmPoolWithTag(IN PVOID P
,
465 PPOOL_HEADER Entry
, NextEntry
;
469 PPOOL_DESCRIPTOR PoolDesc
;
470 BOOLEAN Combined
= FALSE
;
473 // Quickly deal with big page allocations
475 if (PAGE_ALIGN(P
) == P
)
482 // Get the entry for this pool allocation
483 // The pointer math here may look wrong or confusing, but it is quite right
489 // Get the size of the entry, and it's pool type, then load the descriptor
490 // for this pool type
492 BlockSize
= Entry
->BlockSize
;
493 PoolType
= (Entry
->PoolType
& 3) - 1;
494 PoolDesc
= PoolVector
[PoolType
];
497 // Get the pointer to the next entry
499 NextEntry
= Entry
+ BlockSize
;
502 // Acquire the pool lock
504 OldIrql
= ExLockPool(PoolDesc
);
507 // Check if the next allocation is at the end of the page
509 if (PAGE_ALIGN(NextEntry
) != NextEntry
)
512 // We may be able to combine the block if it's free
514 if (NextEntry
->PoolType
== 0)
517 // The next block is free, so we'll do a combine
522 // Make sure there's actual data in the block -- anything smaller
523 // than this means we only have the header, so there's no linked list
526 if ((NextEntry
->BlockSize
!= 1))
529 // The block is at least big enough to have a linked list, so go
530 // ahead and remove it
532 RemoveEntryList((PLIST_ENTRY
)NextEntry
+ 1);
536 // Our entry is now combined with the next entry
538 Entry
->BlockSize
= Entry
->BlockSize
+ NextEntry
->BlockSize
;
543 // Now check if there was a previous entry on the same page as us
545 if (Entry
->PreviousSize
)
548 // Great, grab that entry and check if it's free
550 NextEntry
= Entry
- Entry
->PreviousSize
;
551 if (NextEntry
->PoolType
== 0)
554 // It is, so we can do a combine
559 // Make sure there's actual data in the block -- anything smaller
560 // than this means we only have the header so there's no linked list
563 if ((NextEntry
->BlockSize
!= 1))
566 // The block is at least big enough to have a linked list, so go
567 // ahead and remove it
569 RemoveEntryList((PLIST_ENTRY
)NextEntry
+ 1);
573 // Combine our original block (which might've already been combined
574 // with the next block), into the previous block
576 NextEntry
->BlockSize
= NextEntry
->BlockSize
+ Entry
->BlockSize
;
579 // And now we'll work with the previous block instead
586 // By now, it may have been possible for our combined blocks to actually
587 // have made up a full page (if there were only 2-3 allocations on the
588 // page, they could've all been combined).
590 if ((PAGE_ALIGN(Entry
) == Entry
) &&
591 (PAGE_ALIGN(Entry
+ Entry
->BlockSize
) == Entry
+ Entry
->BlockSize
))
594 // In this case, release the pool lock, and free the page
596 ExUnlockPool(PoolDesc
, OldIrql
);
597 MiFreePoolPages(Entry
);
602 // Otherwise, we now have a free block (or a combination of 2 or 3)
605 BlockSize
= Entry
->BlockSize
;
606 ASSERT(BlockSize
!= 1);
609 // Check if we actually did combine it with anyone
614 // Get the first combined block (either our original to begin with, or
615 // the one after the original, depending if we combined with the previous)
617 NextEntry
= Entry
+ BlockSize
;
620 // As long as the next block isn't on a page boundary, have it point
623 if (PAGE_ALIGN(NextEntry
) != NextEntry
) NextEntry
->PreviousSize
= BlockSize
;
627 // Insert this new free block, and release the pool lock
629 InsertHeadList(&PoolDesc
->ListHeads
[BlockSize
- 1], (PLIST_ENTRY
)Entry
+ 1);
630 ExUnlockPool(PoolDesc
, OldIrql
);
635 ExFreeArmPool(PVOID P
)
638 // Just free without checking for the tag
640 ExFreeArmPoolWithTag(P
, 0);
648 ExQueryPoolBlockSize(IN PVOID PoolBlock
,
649 OUT PBOOLEAN QuotaCharged
)
664 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType
,
665 IN SIZE_T NumberOfBytes
)
670 return ExAllocatePoolWithQuotaTag(PoolType
, NumberOfBytes
, 'enoN');
678 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType
,
679 IN SIZE_T NumberOfBytes
,
681 IN EX_POOL_PRIORITY Priority
)
687 return ExAllocatePoolWithTag(PoolType
, NumberOfBytes
, Tag
);
695 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType
,
696 IN SIZE_T NumberOfBytes
,
703 return ExAllocatePoolWithTag(PoolType
, NumberOfBytes
, Tag
);