2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
9 /* INCLUDES *******************************************************************/
15 #line 15 "ARMĀ³::EXPOOL"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
19 /* GLOBALS ********************************************************************/
21 POOL_DESCRIPTOR NonPagedPoolDescriptor
;
22 PPOOL_DESCRIPTOR PoolVector
[2];
24 /* PRIVATE FUNCTIONS **********************************************************/
28 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor
,
29 IN POOL_TYPE PoolType
,
34 PLIST_ENTRY NextEntry
, LastEntry
;
37 // Setup the descriptor based on the caller's request
39 PoolDescriptor
->PoolType
= PoolType
;
40 PoolDescriptor
->PoolIndex
= PoolIndex
;
41 PoolDescriptor
->Threshold
= Threshold
;
42 PoolDescriptor
->LockAddress
= PoolLock
;
45 // Initialize accounting data
47 PoolDescriptor
->RunningAllocs
= 0;
48 PoolDescriptor
->RunningDeAllocs
= 0;
49 PoolDescriptor
->TotalPages
= 0;
50 PoolDescriptor
->TotalBytes
= 0;
51 PoolDescriptor
->TotalBigPages
= 0;
54 // Nothing pending for now
56 PoolDescriptor
->PendingFrees
= NULL
;
57 PoolDescriptor
->PendingFreeDepth
= 0;
60 // Loop all the descriptor's allocation lists and initialize them
62 NextEntry
= PoolDescriptor
->ListHeads
;
63 LastEntry
= NextEntry
+ POOL_LISTS_PER_PAGE
;
64 while (NextEntry
< LastEntry
) InitializeListHead(NextEntry
++);
69 InitializePool(IN POOL_TYPE PoolType
,
72 ASSERT(PoolType
== NonPagedPool
);
75 // Initialize the nonpaged pool descirptor
77 PoolVector
[PoolType
] = &NonPagedPoolDescriptor
;
78 ExInitializePoolDescriptor(PoolVector
[PoolType
],
87 ExAllocateArmPoolWithTag(IN POOL_TYPE PoolType
,
88 IN SIZE_T NumberOfBytes
,
91 PPOOL_DESCRIPTOR PoolDesc
;
93 PPOOL_HEADER Entry
, NextEntry
, FragmentEntry
;
101 ASSERT(Tag
!= ' GIB');
102 ASSERT(NumberOfBytes
!= 0);
105 // Get the pool type and its corresponding vector for this request
107 PoolType
= PoolType
& BASE_POOL_TYPE_MASK
;
108 PoolDesc
= PoolVector
[PoolType
];
109 ASSERT(PoolDesc
!= NULL
);
112 // Check if this is a big page allocation
114 if (NumberOfBytes
> POOL_MAX_ALLOC
)
117 // Then just return the number of pages requested
119 return MiAllocatePoolPages(PoolType
, NumberOfBytes
);
123 // Should never request 0 bytes from the pool, but since so many drivers do
124 // it, we'll just assume they want 1 byte, based on NT's similar behavior
126 if (!NumberOfBytes
) NumberOfBytes
= 1;
129 // A pool allocation is defined by its data, a linked list to connect it to
130 // the free list (if necessary), and a pool header to store accounting info.
131 // Calculate this size, then convert it into a block size (units of pool
134 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
135 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
136 // the direct allocation of pages.
138 i
= (NumberOfBytes
+ sizeof(POOL_HEADER
) + sizeof(LIST_ENTRY
) - 1) /
142 // Loop in the free lists looking for a block if this size. Start with the
143 // list optimized for this kind of size lookup
145 ListHead
= &PoolDesc
->ListHeads
[i
];
149 // Are there any free entries available on this list?
151 if (!IsListEmpty(ListHead
))
154 // Acquire the nonpaged pool lock now
156 OldIrql
= KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock
);
159 // And make sure the list still has entries
161 if (IsListEmpty(ListHead
))
164 // Someone raced us (and won) before we had a chance to acquire
169 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock
, OldIrql
);
175 // Remove a free entry from the list
176 // Note that due to the way we insert free blocks into multiple lists
177 // there is a guarantee that any block on this list will either be
178 // of the correct size, or perhaps larger.
180 Entry
= (PPOOL_HEADER
)RemoveHeadList(ListHead
) - 1;
181 ASSERT(Entry
->BlockSize
>= i
);
182 ASSERT(Entry
->PoolType
== 0);
185 // Check if this block is larger that what we need. The block could
186 // not possibly be smaller, due to the reason explained above (and
187 // we would've asserted on a checked build if this was the case).
189 if (Entry
->BlockSize
!= i
)
192 // Is there an entry before this one?
194 if (Entry
->PreviousSize
== 0)
197 // There isn't anyone before us, so take the next block and
198 // turn it into a fragment that contains the leftover data
199 // that we don't need to satisfy the caller's request
201 FragmentEntry
= Entry
+ i
;
202 FragmentEntry
->BlockSize
= Entry
->BlockSize
- i
;
205 // And make it point back to us
207 FragmentEntry
->PreviousSize
= i
;
210 // Now get the block that follows the new fragment and check
211 // if it's still on the same page as us (and not at the end)
213 NextEntry
= FragmentEntry
+ FragmentEntry
->BlockSize
;
214 if (PAGE_ALIGN(NextEntry
) != NextEntry
)
217 // Adjust this next block to point to our newly created
220 NextEntry
->PreviousSize
= FragmentEntry
->BlockSize
;
226 // There is a free entry before us, which we know is smaller
227 // so we'll make this entry the fragment instead
229 FragmentEntry
= Entry
;
232 // And then we'll remove from it the actual size required.
233 // Now the entry is a leftover free fragment
235 Entry
->BlockSize
-= i
;
238 // Now let's go to the next entry after the fragment (which
239 // used to point to our original free entry) and make it
240 // reference the new fragment entry instead.
242 // This is the entry that will actually end up holding the
245 Entry
+= Entry
->BlockSize
;
246 Entry
->PreviousSize
= FragmentEntry
->BlockSize
;
249 // And now let's go to the entry after that one and check if
250 // it's still on the same page, and not at the end
252 NextEntry
= Entry
+ i
;
253 if (PAGE_ALIGN(NextEntry
) != NextEntry
)
256 // Make it reference the allocation entry
258 NextEntry
->PreviousSize
= i
;
263 // Now our (allocation) entry is the right size
265 Entry
->BlockSize
= i
;
268 // And the next entry is now the free fragment which contains
269 // the remaining difference between how big the original entry
270 // was, and the actual size the caller needs/requested.
272 FragmentEntry
->PoolType
= 0;
273 BlockSize
= FragmentEntry
->BlockSize
;
276 // Now check if enough free bytes remained for us to have a
277 // "full" entry, which contains enough bytes for a linked list
278 // and thus can be used for allocations (up to 8 bytes...)
283 // Insert the free entry into the free list for this size
285 InsertTailList(&PoolDesc
->ListHeads
[BlockSize
- 1],
286 (PLIST_ENTRY
)FragmentEntry
+ 1);
291 // We have found an entry for this allocation, so set the pool type
292 // and release the lock since we're done
294 Entry
->PoolType
= PoolType
+ 1;
295 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock
, OldIrql
);
298 // Return the pool allocation
300 Entry
->PoolTag
= Tag
;
303 } while (++ListHead
!= &PoolDesc
->ListHeads
[POOL_LISTS_PER_PAGE
]);
306 // There were no free entries left, so we have to allocate a new fresh page
308 Entry
= MiAllocatePoolPages(PoolType
, PAGE_SIZE
);
310 Entry
->BlockSize
= i
;
311 Entry
->PoolType
= PoolType
+ 1;
314 // This page will have two entries -- one for the allocation (which we just
315 // created above), and one for the remaining free bytes, which we're about
316 // to create now. The free bytes are the whole page minus what was allocated
317 // and then converted into units of block headers.
319 BlockSize
= (PAGE_SIZE
/ sizeof(POOL_HEADER
)) - i
;
320 FragmentEntry
= Entry
+ i
;
321 FragmentEntry
->Ulong1
= 0;
322 FragmentEntry
->BlockSize
= BlockSize
;
323 FragmentEntry
->PreviousSize
= i
;
326 // Now check if enough free bytes remained for us to have a "full" entry,
327 // which contains enough bytes for a linked list and thus can be used for
328 // allocations (up to 8 bytes...)
330 if (FragmentEntry
->BlockSize
!= 1)
333 // Excellent -- acquire the nonpaged pool lock
335 OldIrql
= KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock
);
338 // And insert the free entry into the free list for this block size
340 InsertTailList(&PoolDesc
->ListHeads
[BlockSize
- 1],
341 (PLIST_ENTRY
)FragmentEntry
+ 1);
344 // Release the nonpaged pool lock
346 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock
, OldIrql
);
350 // And return the pool allocation
352 Entry
->PoolTag
= Tag
;
358 ExAllocateArmPool(POOL_TYPE PoolType
,
359 SIZE_T NumberOfBytes
)
362 // Use a default tag of "None"
364 return ExAllocatePoolWithTag(PoolType
, NumberOfBytes
, 'enoN');
369 ExFreeArmPoolWithTag(IN PVOID P
,
372 PPOOL_HEADER Entry
, NextEntry
;
376 PPOOL_DESCRIPTOR PoolDesc
;
377 BOOLEAN Combined
= FALSE
;
380 // Quickly deal with big page allocations
382 if (PAGE_ALIGN(P
) == P
) return (VOID
)MiFreePoolPages(P
);
385 // Get the entry for this pool allocation
386 // The pointer math here may look wrong or confusing, but it is quite right
392 // Get the size of the entry, and it's pool type, then load the descriptor
393 // for this pool type
395 BlockSize
= Entry
->BlockSize
;
396 PoolType
= (Entry
->PoolType
& BASE_POOL_TYPE_MASK
) - 1;
397 PoolDesc
= PoolVector
[PoolType
];
400 // Get the pointer to the next entry
402 NextEntry
= Entry
+ BlockSize
;
405 // Acquire the nonpaged pool lock
407 OldIrql
= KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock
);
410 // Check if the next allocation is at the end of the page
412 if (PAGE_ALIGN(NextEntry
) != NextEntry
)
415 // We may be able to combine the block if it's free
417 if (NextEntry
->PoolType
== 0)
420 // The next block is free, so we'll do a combine
425 // Make sure there's actual data in the block -- anything smaller
426 // than this means we only have the header, so there's no linked list
429 if ((NextEntry
->BlockSize
!= 1))
432 // The block is at least big enough to have a linked list, so go
433 // ahead and remove it
435 RemoveEntryList((PLIST_ENTRY
)NextEntry
+ 1);
439 // Our entry is now combined with the next entry
441 Entry
->BlockSize
= Entry
->BlockSize
+ NextEntry
->BlockSize
;
446 // Now check if there was a previous entry on the same page as us
448 if (Entry
->PreviousSize
)
451 // Great, grab that entry and check if it's free
453 NextEntry
= Entry
- Entry
->PreviousSize
;
454 if (NextEntry
->PoolType
== 0)
457 // It is, so we can do a combine
462 // Make sure there's actual data in the block -- anything smaller
463 // than this means we only have the header so there's no linked list
466 if ((NextEntry
->BlockSize
!= 1))
469 // The block is at least big enough to have a linked list, so go
470 // ahead and remove it
472 RemoveEntryList((PLIST_ENTRY
)NextEntry
+ 1);
476 // Combine our original block (which might've already been combined
477 // with the next block), into the previous block
479 NextEntry
->BlockSize
= NextEntry
->BlockSize
+ Entry
->BlockSize
;
482 // And now we'll work with the previous block instead
489 // By now, it may have been possible for our combined blocks to actually
490 // have made up a full page (if there were only 2-3 allocations on the
491 // page, they could've all been combined).
493 if ((PAGE_ALIGN(Entry
) == Entry
) &&
494 (PAGE_ALIGN(Entry
+ Entry
->BlockSize
) == Entry
+ Entry
->BlockSize
))
497 // In this case, release the nonpaged pool lock, and free the page
499 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock
, OldIrql
);
500 return (VOID
)MiFreePoolPages(Entry
);
504 // Otherwise, we now have a free block (or a combination of 2 or 3)
507 BlockSize
= Entry
->BlockSize
;
508 ASSERT(BlockSize
!= 1);
511 // Check if we actually did combine it with anyone
516 // Get the first combined block (either our original to begin with, or
517 // the one after the original, depending if we combined with the previous)
519 NextEntry
= Entry
+ BlockSize
;
522 // As long as the next block isn't on a page boundary, have it point
525 if (PAGE_ALIGN(NextEntry
) != NextEntry
) NextEntry
->PreviousSize
= BlockSize
;
529 // Insert this new free block, and release the nonpaged pool lock
531 InsertHeadList(&PoolDesc
->ListHeads
[BlockSize
- 1], (PLIST_ENTRY
)Entry
+ 1);
532 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock
, OldIrql
);
537 ExFreeArmPool(PVOID P
)
540 // Just free without checking for the tag
542 ExFreeArmPoolWithTag(P
, 0);