2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/pool.c
5 * PURPOSE: ARM Memory Manager Pool Allocator
6 * PROGRAMMERS: ReactOS Portable Systems Group
9 /* INCLUDES *******************************************************************/
15 #line 15 "ARMĀ³::POOL"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
19 /* GLOBALS ********************************************************************/
21 LIST_ENTRY MmNonPagedPoolFreeListHead
[MI_MAX_FREE_PAGE_LISTS
];
22 PFN_NUMBER MmNumberOfFreeNonPagedPool
, MiExpansionPoolPagesInitialCharge
;
23 PVOID MmNonPagedPoolEnd0
;
24 PFN_NUMBER MiStartOfInitialPoolFrame
, MiEndOfInitialPoolFrame
;
25 KGUARDED_MUTEX MmPagedPoolMutex
;
26 MM_PAGED_POOL_INFO MmPagedPoolInfo
;
27 SIZE_T MmAllocatedNonPagedPool
;
28 ULONG MmSpecialPoolTag
;
30 /* PRIVATE FUNCTIONS **********************************************************/
34 MiInitializeArmPool(VOID
)
38 PMMFREE_POOL_ENTRY FreeEntry
, FirstEntry
;
43 // We keep 4 lists of free pages (4 lists help avoid contention)
45 for (i
= 0; i
< MI_MAX_FREE_PAGE_LISTS
; i
++)
48 // Initialize each of them
50 InitializeListHead(&MmNonPagedPoolFreeListHead
[i
]);
54 // Calculate how many pages the initial nonpaged pool has
56 PoolPages
= BYTES_TO_PAGES(MmSizeOfNonPagedPoolInBytes
);
57 MmNumberOfFreeNonPagedPool
= PoolPages
;
60 // Initialize the first free entry
62 FreeEntry
= MmNonPagedPoolStart
;
63 FirstEntry
= FreeEntry
;
64 FreeEntry
->Size
= PoolPages
;
65 FreeEntry
->Owner
= FirstEntry
;
68 // Insert it into the last list
70 InsertHeadList(&MmNonPagedPoolFreeListHead
[MI_MAX_FREE_PAGE_LISTS
- 1],
74 // Now create free entries for every single other page
76 while (PoolPages
-- > 1)
79 // Link them all back to the original entry
81 FreeEntry
= (PMMFREE_POOL_ENTRY
)((ULONG_PTR
)FreeEntry
+ PAGE_SIZE
);
82 FreeEntry
->Owner
= FirstEntry
;
86 // Validate and remember first allocated pool page
88 PointerPte
= MiAddressToPte(MmNonPagedPoolStart
);
89 ASSERT(PointerPte
->u
.Hard
.Valid
== 1);
90 MiStartOfInitialPoolFrame
= PFN_FROM_PTE(PointerPte
);
93 // Keep track of where initial nonpaged pool ends
95 MmNonPagedPoolEnd0
= (PVOID
)((ULONG_PTR
)MmNonPagedPoolStart
+
96 MmSizeOfNonPagedPoolInBytes
);
99 // Validate and remember last allocated pool page
101 PointerPte
= MiAddressToPte((PVOID
)((ULONG_PTR
)MmNonPagedPoolEnd0
- 1));
102 ASSERT(PointerPte
->u
.Hard
.Valid
== 1);
103 MiEndOfInitialPoolFrame
= PFN_FROM_PTE(PointerPte
);
106 // Validate the first nonpaged pool expansion page (which is a guard page)
108 PointerPte
= MiAddressToPte(MmNonPagedPoolExpansionStart
);
109 ASSERT(PointerPte
->u
.Hard
.Valid
== 0);
112 // Calculate the size of the expansion region alone
114 MiExpansionPoolPagesInitialCharge
=
115 BYTES_TO_PAGES(MmMaximumNonPagedPoolInBytes
- MmSizeOfNonPagedPoolInBytes
);
118 // Remove 2 pages, since there's a guard page on top and on the bottom
120 MiExpansionPoolPagesInitialCharge
-= 2;
123 // Now initialize the nonpaged pool expansion PTE space. Remember there's a
124 // guard page on top so make sure to skip it. The bottom guard page will be
125 // guaranteed by the fact our size is off by one.
127 MiInitializeSystemPtes(PointerPte
+ 1,
128 MiExpansionPoolPagesInitialCharge
,
129 NonPagedPoolExpansion
);
134 MiAllocatePoolPages(IN POOL_TYPE PoolType
,
135 IN SIZE_T SizeInBytes
)
137 PFN_NUMBER SizeInPages
, PageFrameNumber
;
140 PLIST_ENTRY NextEntry
, NextHead
, LastHead
;
141 PMMPTE PointerPte
, StartPte
;
144 PVOID BaseVa
, BaseVaStart
;
145 PMMFREE_POOL_ENTRY FreeEntry
;
146 PKSPIN_LOCK_QUEUE LockQueue
;
149 // Figure out how big the allocation is in pages
151 SizeInPages
= BYTES_TO_PAGES(SizeInBytes
);
156 if (PoolType
== PagedPool
)
159 // Lock the paged pool mutex
161 KeAcquireGuardedMutex(&MmPagedPoolMutex
);
164 // Find some empty allocation space
166 i
= RtlFindClearBitsAndSet(MmPagedPoolInfo
.PagedPoolAllocationMap
,
168 MmPagedPoolInfo
.PagedPoolHint
);
172 // Get the page bit count
174 i
= ((SizeInPages
- 1) / 1024) + 1;
177 // Check if there is enougn paged pool expansion space left
179 if (MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
>
180 MiAddressToPte(MmPagedPoolInfo
.LastPteForPagedPool
))
185 DPRINT1("OUT OF PAGED POOL!!!\n");
186 KeReleaseGuardedMutex(&MmPagedPoolMutex
);
191 // Check if we'll have to expand past the last PTE we have available
193 if (((i
- 1) + MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
) >
194 MiAddressToPte(MmPagedPoolInfo
.LastPteForPagedPool
))
197 // We can only support this much then
199 SizeInPages
= MiAddressToPte(MmPagedPoolInfo
.LastPteForPagedPool
) -
200 MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
+
202 ASSERT(SizeInPages
< i
);
208 // Otherwise, there is plenty of space left for this expansion
214 // Get the template PTE we'll use to expand
216 TempPte
= ValidKernelPte
;
219 // Get the first PTE in expansion space
221 PointerPte
= MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
;
222 BaseVa
= MiPteToAddress(PointerPte
);
223 BaseVaStart
= BaseVa
;
226 // Lock the PFN database and loop pages
228 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
232 // It should not already be valid
234 ASSERT(PointerPte
->u
.Hard
.Valid
== 0);
237 // Request a paged pool page and write the PFN for it
239 PageFrameNumber
= MmAllocPage(MC_PPOOL
, 0);
240 TempPte
.u
.Hard
.PageFrameNumber
= PageFrameNumber
;
243 // Save it into our double-buffered system page directory
245 MmSystemPagePtes
[(ULONG_PTR
)PointerPte
& (PAGE_SIZE
- 1) /
246 sizeof(MMPTE
)] = TempPte
;
249 // Write the actual PTE now
251 *PointerPte
++ = TempPte
;
254 // Move on to the next expansion address
256 BaseVa
= (PVOID
)((ULONG_PTR
)BaseVa
+ PAGE_SIZE
);
260 // Release the PFN database lock
262 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
265 // These pages are now available, clear their availablity bits
267 RtlClearBits(MmPagedPoolInfo
.PagedPoolAllocationMap
,
268 (MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
-
269 MiAddressToPte(MmPagedPoolInfo
.FirstPteForPagedPool
)) *
274 // Update the next expansion location
276 MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
+= SizeInPages
;
279 // Zero out the newly available memory
281 RtlZeroMemory(BaseVaStart
, SizeInPages
* PAGE_SIZE
);
284 // Now try consuming the pages again
286 SizeInPages
= BYTES_TO_PAGES(SizeInBytes
);
287 i
= RtlFindClearBitsAndSet(MmPagedPoolInfo
.PagedPoolAllocationMap
,
295 DPRINT1("OUT OF PAGED POOL!!!\n");
296 KeReleaseGuardedMutex(&MmPagedPoolMutex
);
302 // Update the pool hint if the request was just one page
304 if (SizeInPages
== 1) MmPagedPoolInfo
.PagedPoolHint
= i
+ 1;
307 // Update the end bitmap so we know the bounds of this allocation when
308 // the time comes to free it
310 RtlSetBit(MmPagedPoolInfo
.EndOfPagedPoolBitmap
, i
+ SizeInPages
- 1);
313 // Now we can release the lock (it mainly protects the bitmap)
315 KeReleaseGuardedMutex(&MmPagedPoolMutex
);
318 // Now figure out where this allocation starts
320 BaseVa
= (PVOID
)((ULONG_PTR
)MmPagedPoolStart
+ (i
<< PAGE_SHIFT
));
325 KeFlushEntireTb(TRUE
, TRUE
);
328 // Setup a demand-zero writable PTE
331 MI_MAKE_WRITE_PAGE(&TempPte
);
334 // Find the first and last PTE, then loop them all
336 PointerPte
= MiAddressToPte(BaseVa
);
337 StartPte
= PointerPte
+ SizeInPages
;
341 // Write the demand zero PTE and keep going
343 *PointerPte
++ = TempPte
;
344 } while (PointerPte
< StartPte
);
347 // Return the allocation address to the caller
353 // Allocations of less than 4 pages go into their individual buckets
356 if (i
>= MI_MAX_FREE_PAGE_LISTS
) i
= MI_MAX_FREE_PAGE_LISTS
- 1;
359 // Loop through all the free page lists based on the page index
361 NextHead
= &MmNonPagedPoolFreeListHead
[i
];
362 LastHead
= &MmNonPagedPoolFreeListHead
[MI_MAX_FREE_PAGE_LISTS
];
365 // Acquire the nonpaged pool lock
367 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock
);
371 // Now loop through all the free page entries in this given list
373 NextEntry
= NextHead
->Flink
;
374 while (NextEntry
!= NextHead
)
377 // Grab the entry and see if it can handle our allocation
379 FreeEntry
= CONTAINING_RECORD(NextEntry
, MMFREE_POOL_ENTRY
, List
);
380 if (FreeEntry
->Size
>= SizeInPages
)
383 // It does, so consume the pages from here
385 FreeEntry
->Size
-= SizeInPages
;
388 // The allocation will begin in this free page area
390 BaseVa
= (PVOID
)((ULONG_PTR
)FreeEntry
+
391 (FreeEntry
->Size
<< PAGE_SHIFT
));
394 // This is not a free page segment anymore
396 RemoveEntryList(&FreeEntry
->List
);
399 // However, check if its' still got space left
401 if (FreeEntry
->Size
!= 0)
404 // Insert it back into a different list, based on its pages
406 i
= FreeEntry
->Size
- 1;
407 if (i
>= MI_MAX_FREE_PAGE_LISTS
) i
= MI_MAX_FREE_PAGE_LISTS
- 1;
408 InsertTailList (&MmNonPagedPoolFreeListHead
[i
],
413 // Grab the PTE for this allocation
415 PointerPte
= MiAddressToPte(BaseVa
);
416 ASSERT(PointerPte
->u
.Hard
.Valid
== 1);
419 // Grab the PFN NextEntry and index
421 Pfn1
= MiGetPfnEntry(PFN_FROM_PTE(PointerPte
));
424 // Now mark it as the beginning of an allocation
426 ASSERT(Pfn1
->u3
.e1
.StartOfAllocation
== 0);
427 Pfn1
->u3
.e1
.StartOfAllocation
= 1;
430 // Check if the allocation is larger than one page
432 if (SizeInPages
!= 1)
435 // Navigate to the last PFN entry and PTE
437 PointerPte
+= SizeInPages
- 1;
438 ASSERT(PointerPte
->u
.Hard
.Valid
== 1);
439 Pfn1
= MiGetPfnEntry(PointerPte
->u
.Hard
.PageFrameNumber
);
443 // Mark this PFN as the last (might be the same as the first)
445 ASSERT(Pfn1
->u3
.e1
.EndOfAllocation
== 0);
446 Pfn1
->u3
.e1
.EndOfAllocation
= 1;
449 // Release the nonpaged pool lock, and return the allocation
451 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock
, OldIrql
);
456 // Try the next free page entry
458 NextEntry
= FreeEntry
->List
.Flink
;
460 } while (++NextHead
< LastHead
);
463 // If we got here, we're out of space.
464 // Start by releasing the lock
466 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock
, OldIrql
);
469 // Allocate some system PTEs
471 StartPte
= MiReserveSystemPtes(SizeInPages
, NonPagedPoolExpansion
);
472 PointerPte
= StartPte
;
473 if (StartPte
== NULL
)
478 DPRINT1("Out of NP Expansion Pool\n");
483 // Acquire the pool lock now
485 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock
);
488 // Lock the PFN database too
490 LockQueue
= &KeGetCurrentPrcb()->LockQueue
[LockQueuePfnLock
];
491 KeAcquireQueuedSpinLockAtDpcLevel(LockQueue
);
496 TempPte
= ValidKernelPte
;
502 PageFrameNumber
= MmAllocPage(MC_NPPOOL
, 0);
505 // Get the PFN entry for it
507 Pfn1
= MiGetPfnEntry(PageFrameNumber
);
510 // Write the PTE for it
512 TempPte
.u
.Hard
.PageFrameNumber
= PageFrameNumber
;
513 ASSERT(PointerPte
->u
.Hard
.Valid
== 0);
514 ASSERT(TempPte
.u
.Hard
.Valid
== 1);
515 *PointerPte
++ = TempPte
;
516 } while (--SizeInPages
> 0);
519 // This is the last page
521 Pfn1
->u3
.e1
.EndOfAllocation
= 1;
524 // Get the first page and mark it as such
526 Pfn1
= MiGetPfnEntry(StartPte
->u
.Hard
.PageFrameNumber
);
527 Pfn1
->u3
.e1
.StartOfAllocation
= 1;
530 // Release the PFN and nonpaged pool lock
532 KeReleaseQueuedSpinLockFromDpcLevel(LockQueue
);
533 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock
, OldIrql
);
536 // Return the address
538 return MiPteToAddress(StartPte
);
543 MiFreePoolPages(IN PVOID StartingVa
)
545 PMMPTE PointerPte
, StartPte
;
546 PMMPFN Pfn1
, StartPfn
;
547 PFN_NUMBER FreePages
, NumberOfPages
;
549 PMMFREE_POOL_ENTRY FreeEntry
, NextEntry
, LastEntry
;
555 if ((StartingVa
>= MmPagedPoolStart
) && (StartingVa
<= MmPagedPoolEnd
))
558 // Calculate the offset from the beginning of paged pool, and convert it
561 i
= ((ULONG_PTR
)StartingVa
- (ULONG_PTR
)MmPagedPoolStart
) >> PAGE_SHIFT
;
565 // Now use the end bitmap to scan until we find a set bit, meaning that
566 // this allocation finishes here
568 while (!RtlTestBit(MmPagedPoolInfo
.EndOfPagedPoolBitmap
, End
)) End
++;
571 // Now calculate the total number of pages this allocation spans
573 NumberOfPages
= End
- i
+ 1;
576 // Acquire the paged pool lock
578 KeAcquireGuardedMutex(&MmPagedPoolMutex
);
581 // Clear the allocation and free bits
583 RtlClearBit(MmPagedPoolInfo
.EndOfPagedPoolBitmap
, i
);
584 RtlClearBits(MmPagedPoolInfo
.PagedPoolAllocationMap
, i
, NumberOfPages
);
587 // Update the hint if we need to
589 if (i
< MmPagedPoolInfo
.PagedPoolHint
) MmPagedPoolInfo
.PagedPoolHint
= i
;
592 // Release the lock protecting the bitmaps
594 KeReleaseGuardedMutex(&MmPagedPoolMutex
);
597 // And finally return the number of pages freed
599 return NumberOfPages
;
603 // Get the first PTE and its corresponding PFN entry
605 StartPte
= PointerPte
= MiAddressToPte(StartingVa
);
606 StartPfn
= Pfn1
= MiGetPfnEntry(PointerPte
->u
.Hard
.PageFrameNumber
);
609 // Loop until we find the last PTE
611 while (Pfn1
->u3
.e1
.EndOfAllocation
== 0)
617 Pfn1
= MiGetPfnEntry(PointerPte
->u
.Hard
.PageFrameNumber
);
621 // Now we know how many pages we have
623 NumberOfPages
= PointerPte
- StartPte
+ 1;
626 // Acquire the nonpaged pool lock
628 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock
);
631 // Mark the first and last PTEs as not part of an allocation anymore
633 StartPfn
->u3
.e1
.StartOfAllocation
= 0;
634 Pfn1
->u3
.e1
.EndOfAllocation
= 0;
637 // Assume we will free as many pages as the allocation was
639 FreePages
= NumberOfPages
;
642 // Peek one page past the end of the allocation
647 // Guard against going past initial nonpaged pool
649 if (MiGetPfnEntryIndex(Pfn1
) == MiEndOfInitialPoolFrame
)
652 // This page is on the outskirts of initial nonpaged pool, so ignore it
659 // Otherwise, our entire allocation must've fit within the initial non
660 // paged pool, or the expansion nonpaged pool, so get the PFN entry of
661 // the next allocation
663 ASSERT((ULONG_PTR
)StartingVa
+ NumberOfPages
<= (ULONG_PTR
)MmNonPagedPoolEnd
);
664 if (PointerPte
->u
.Hard
.Valid
== 1)
667 // It's either expansion or initial: get the PFN entry
669 Pfn1
= MiGetPfnEntry(PointerPte
->u
.Hard
.PageFrameNumber
);
674 // This means we've reached the guard page that protects the end of
675 // the expansion nonpaged pool
683 // Check if this allocation actually exists
685 if ((Pfn1
) && (Pfn1
->u3
.e1
.StartOfAllocation
== 0))
688 // It doesn't, so we should actually locate a free entry descriptor
690 FreeEntry
= (PMMFREE_POOL_ENTRY
)((ULONG_PTR
)StartingVa
+
691 (NumberOfPages
<< PAGE_SHIFT
));
692 ASSERT(FreeEntry
->Owner
== FreeEntry
);
695 // Consume this entry's pages, and remove it from its free list
697 FreePages
+= FreeEntry
->Size
;
698 RemoveEntryList (&FreeEntry
->List
);
702 // Now get the official free entry we'll create for the caller's allocation
704 FreeEntry
= StartingVa
;
707 // Check if the our allocation is the very first page
709 if (MiGetPfnEntryIndex(StartPfn
) == MiStartOfInitialPoolFrame
)
712 // Then we can't do anything or we'll risk underflowing
719 // Otherwise, get the PTE for the page right before our allocation
721 PointerPte
-= NumberOfPages
+ 1;
722 if (PointerPte
->u
.Hard
.Valid
== 1)
725 // It's either expansion or initial nonpaged pool, get the PFN entry
727 Pfn1
= MiGetPfnEntry(PointerPte
->u
.Hard
.PageFrameNumber
);
732 // We must've reached the guard page, so don't risk touching it
739 // Check if there is a valid PFN entry for the page before the allocation
740 // and then check if this page was actually the end of an allocation.
741 // If it wasn't, then we know for sure it's a free page
743 if ((Pfn1
) && (Pfn1
->u3
.e1
.EndOfAllocation
== 0))
746 // Get the free entry descriptor for that given page range
748 FreeEntry
= (PMMFREE_POOL_ENTRY
)((ULONG_PTR
)StartingVa
- PAGE_SIZE
);
749 FreeEntry
= FreeEntry
->Owner
;
752 // Check if the entry is small enough to be indexed on a free list
753 // If it is, we'll want to re-insert it, since we're about to
754 // collapse our pages on top of it, which will change its count
756 if (FreeEntry
->Size
< (MI_MAX_FREE_PAGE_LISTS
- 1))
759 // Remove the list from where it is now
761 RemoveEntryList(&FreeEntry
->List
);
766 FreeEntry
->Size
+= FreePages
;
769 // And now find the new appropriate list to place it in
771 i
= (ULONG
)(FreeEntry
->Size
- 1);
772 if (i
>= MI_MAX_FREE_PAGE_LISTS
) i
= MI_MAX_FREE_PAGE_LISTS
- 1;
777 InsertTailList(&MmNonPagedPoolFreeListHead
[i
], &FreeEntry
->List
);
782 // Otherwise, just combine our free pages into this entry
784 FreeEntry
->Size
+= FreePages
;
789 // Check if we were unable to do any compaction, and we'll stick with this
791 if (FreeEntry
== StartingVa
)
794 // Well, now we are a free entry. At worse we just have our newly freed
795 // pages, at best we have our pages plus whatever entry came after us
797 FreeEntry
->Size
= FreePages
;
800 // Find the appropriate list we should be on
802 i
= FreeEntry
->Size
- 1;
803 if (i
>= MI_MAX_FREE_PAGE_LISTS
) i
= MI_MAX_FREE_PAGE_LISTS
- 1;
808 InsertTailList (&MmNonPagedPoolFreeListHead
[i
], &FreeEntry
->List
);
812 // Just a sanity check
814 ASSERT(FreePages
!= 0);
817 // Get all the pages between our allocation and its end. These will all now
818 // become free page chunks.
820 NextEntry
= StartingVa
;
821 LastEntry
= (PMMFREE_POOL_ENTRY
)((ULONG_PTR
)NextEntry
+ (FreePages
<< PAGE_SHIFT
));
825 // Link back to the parent free entry, and keep going
827 NextEntry
->Owner
= FreeEntry
;
828 NextEntry
= (PMMFREE_POOL_ENTRY
)((ULONG_PTR
)NextEntry
+ PAGE_SIZE
);
829 } while (NextEntry
!= LastEntry
);
832 // We're done, release the lock and let the caller know how much we freed
834 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock
, OldIrql
);
835 return NumberOfPages
;
841 MiRaisePoolQuota(IN POOL_TYPE PoolType
,
842 IN ULONG CurrentMaxQuota
,
843 OUT PULONG NewMaxQuota
)
849 *NewMaxQuota
= CurrentMaxQuota
+ 65536;
853 /* PUBLIC FUNCTIONS ***********************************************************/
860 MmAllocateMappingAddress(IN SIZE_T NumberOfBytes
,
872 MmFreeMappingAddress(IN PVOID BaseAddress
,