2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/pool.c
5 * PURPOSE: ARM Memory Manager Pool Allocator
6 * PROGRAMMERS: ReactOS Portable Systems Group
9 /* INCLUDES *******************************************************************/
15 #line 15 "ARMĀ³::POOL"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
19 /* GLOBALS ********************************************************************/
21 LIST_ENTRY MmNonPagedPoolFreeListHead
[MI_MAX_FREE_PAGE_LISTS
];
22 PFN_NUMBER MmNumberOfFreeNonPagedPool
, MiExpansionPoolPagesInitialCharge
;
23 PVOID MmNonPagedPoolEnd0
;
24 PFN_NUMBER MiStartOfInitialPoolFrame
, MiEndOfInitialPoolFrame
;
25 KGUARDED_MUTEX MmPagedPoolMutex
;
26 MM_PAGED_POOL_INFO MmPagedPoolInfo
;
27 SIZE_T MmAllocatedNonPagedPool
;
28 ULONG MmSpecialPoolTag
;
29 ULONG MmConsumedPoolPercentage
;
30 BOOLEAN MmProtectFreedNonPagedPool
;
32 /* PRIVATE FUNCTIONS **********************************************************/
36 MiInitializeNonPagedPoolThresholds(VOID
)
38 PFN_NUMBER Size
= MmMaximumNonPagedPoolInPages
;
40 /* Default low threshold of 8MB or one third of nonpaged pool */
41 MiLowNonPagedPoolThreshold
= (8 * _1MB
) >> PAGE_SHIFT
;
42 MiLowNonPagedPoolThreshold
= min(MiLowNonPagedPoolThreshold
, Size
/ 3);
44 /* Default high threshold of 20MB or 50% */
45 MiHighNonPagedPoolThreshold
= (20 * _1MB
) >> PAGE_SHIFT
;
46 MiHighNonPagedPoolThreshold
= min(MiHighNonPagedPoolThreshold
, Size
/ 2);
47 ASSERT(MiLowNonPagedPoolThreshold
< MiHighNonPagedPoolThreshold
);
52 MiInitializePoolEvents(VOID
)
55 PFN_NUMBER FreePoolInPages
;
58 KeAcquireGuardedMutex(&MmPagedPoolMutex
);
60 /* Total size of the paged pool minus the allocated size, is free */
61 FreePoolInPages
= MmSizeOfPagedPoolInPages
- MmPagedPoolInfo
.AllocatedPagedPool
;
63 /* Check the initial state high state */
64 if (FreePoolInPages
>= MiHighPagedPoolThreshold
)
66 /* We have plenty of pool */
67 KeSetEvent(MiHighPagedPoolEvent
, 0, FALSE
);
72 KeClearEvent(MiHighPagedPoolEvent
);
75 /* Check the initial low state */
76 if (FreePoolInPages
<= MiLowPagedPoolThreshold
)
78 /* We're very low in free pool memory */
79 KeSetEvent(MiLowPagedPoolEvent
, 0, FALSE
);
84 KeClearEvent(MiLowPagedPoolEvent
);
87 /* Release the paged pool lock */
88 KeReleaseGuardedMutex(&MmPagedPoolMutex
);
90 /* Now it's time for the nonpaged pool lock */
91 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock
);
93 /* Free pages are the maximum minus what's been allocated */
94 FreePoolInPages
= MmMaximumNonPagedPoolInPages
- MmAllocatedNonPagedPool
;
96 /* Check if we have plenty */
97 if (FreePoolInPages
>= MiHighNonPagedPoolThreshold
)
99 /* We do, set the event */
100 KeSetEvent(MiHighNonPagedPoolEvent
, 0, FALSE
);
104 /* We don't, clear the event */
105 KeClearEvent(MiHighNonPagedPoolEvent
);
108 /* Check if we have very little */
109 if (FreePoolInPages
<= MiLowNonPagedPoolThreshold
)
111 /* We do, set the event */
112 KeSetEvent(MiLowNonPagedPoolEvent
, 0, FALSE
);
116 /* We don't, clear it */
117 KeClearEvent(MiLowNonPagedPoolEvent
);
120 /* We're done, release the nonpaged pool lock */
121 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock
, OldIrql
);
126 MiInitializeNonPagedPool(VOID
)
129 PFN_NUMBER PoolPages
;
130 PMMFREE_POOL_ENTRY FreeEntry
, FirstEntry
;
135 // We keep 4 lists of free pages (4 lists help avoid contention)
137 for (i
= 0; i
< MI_MAX_FREE_PAGE_LISTS
; i
++)
140 // Initialize each of them
142 InitializeListHead(&MmNonPagedPoolFreeListHead
[i
]);
146 // Calculate how many pages the initial nonpaged pool has
148 PoolPages
= BYTES_TO_PAGES(MmSizeOfNonPagedPoolInBytes
);
149 MmNumberOfFreeNonPagedPool
= PoolPages
;
152 // Initialize the first free entry
154 FreeEntry
= MmNonPagedPoolStart
;
155 FirstEntry
= FreeEntry
;
156 FreeEntry
->Size
= PoolPages
;
157 FreeEntry
->Owner
= FirstEntry
;
160 // Insert it into the last list
162 InsertHeadList(&MmNonPagedPoolFreeListHead
[MI_MAX_FREE_PAGE_LISTS
- 1],
166 // Now create free entries for every single other page
168 while (PoolPages
-- > 1)
171 // Link them all back to the original entry
173 FreeEntry
= (PMMFREE_POOL_ENTRY
)((ULONG_PTR
)FreeEntry
+ PAGE_SIZE
);
174 FreeEntry
->Owner
= FirstEntry
;
178 // Validate and remember first allocated pool page
180 PointerPte
= MiAddressToPte(MmNonPagedPoolStart
);
181 ASSERT(PointerPte
->u
.Hard
.Valid
== 1);
182 MiStartOfInitialPoolFrame
= PFN_FROM_PTE(PointerPte
);
185 // Keep track of where initial nonpaged pool ends
187 MmNonPagedPoolEnd0
= (PVOID
)((ULONG_PTR
)MmNonPagedPoolStart
+
188 MmSizeOfNonPagedPoolInBytes
);
191 // Validate and remember last allocated pool page
193 PointerPte
= MiAddressToPte((PVOID
)((ULONG_PTR
)MmNonPagedPoolEnd0
- 1));
194 ASSERT(PointerPte
->u
.Hard
.Valid
== 1);
195 MiEndOfInitialPoolFrame
= PFN_FROM_PTE(PointerPte
);
198 // Validate the first nonpaged pool expansion page (which is a guard page)
200 PointerPte
= MiAddressToPte(MmNonPagedPoolExpansionStart
);
201 ASSERT(PointerPte
->u
.Hard
.Valid
== 0);
204 // Calculate the size of the expansion region alone
206 MiExpansionPoolPagesInitialCharge
=
207 BYTES_TO_PAGES(MmMaximumNonPagedPoolInBytes
- MmSizeOfNonPagedPoolInBytes
);
210 // Remove 2 pages, since there's a guard page on top and on the bottom
212 MiExpansionPoolPagesInitialCharge
-= 2;
215 // Now initialize the nonpaged pool expansion PTE space. Remember there's a
216 // guard page on top so make sure to skip it. The bottom guard page will be
217 // guaranteed by the fact our size is off by one.
219 MiInitializeSystemPtes(PointerPte
+ 1,
220 MiExpansionPoolPagesInitialCharge
,
221 NonPagedPoolExpansion
);
226 MiAllocatePoolPages(IN POOL_TYPE PoolType
,
227 IN SIZE_T SizeInBytes
)
229 PFN_NUMBER SizeInPages
, PageFrameNumber
;
232 PLIST_ENTRY NextEntry
, NextHead
, LastHead
;
233 PMMPTE PointerPte
, StartPte
;
236 PVOID BaseVa
, BaseVaStart
;
237 PMMFREE_POOL_ENTRY FreeEntry
;
238 PKSPIN_LOCK_QUEUE LockQueue
;
241 // Figure out how big the allocation is in pages
243 SizeInPages
= BYTES_TO_PAGES(SizeInBytes
);
248 if (PoolType
== PagedPool
)
251 // Lock the paged pool mutex
253 KeAcquireGuardedMutex(&MmPagedPoolMutex
);
256 // Find some empty allocation space
258 i
= RtlFindClearBitsAndSet(MmPagedPoolInfo
.PagedPoolAllocationMap
,
260 MmPagedPoolInfo
.PagedPoolHint
);
264 // Get the page bit count
266 i
= ((SizeInPages
- 1) / 1024) + 1;
267 DPRINT1("Paged pool expansion: %d %x\n", i
, SizeInPages
);
270 // Check if there is enougn paged pool expansion space left
272 if (MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
>
273 MiAddressToPte(MmPagedPoolInfo
.LastPteForPagedPool
))
278 DPRINT1("OUT OF PAGED POOL!!!\n");
279 KeReleaseGuardedMutex(&MmPagedPoolMutex
);
284 // Check if we'll have to expand past the last PTE we have available
286 if (((i
- 1) + MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
) >
287 MiAddressToPte(MmPagedPoolInfo
.LastPteForPagedPool
))
290 // We can only support this much then
292 SizeInPages
= MiAddressToPte(MmPagedPoolInfo
.LastPteForPagedPool
) -
293 MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
+
295 ASSERT(SizeInPages
< i
);
301 // Otherwise, there is plenty of space left for this expansion
307 // Get the template PTE we'll use to expand
309 TempPte
= ValidKernelPte
;
312 // Get the first PTE in expansion space
314 PointerPte
= MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
;
315 BaseVa
= MiPteToAddress(PointerPte
);
316 BaseVaStart
= BaseVa
;
319 // Lock the PFN database and loop pages
321 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
325 // It should not already be valid
327 ASSERT(PointerPte
->u
.Hard
.Valid
== 0);
330 PageFrameNumber
= MiRemoveAnyPage(0);
331 TempPte
.u
.Hard
.PageFrameNumber
= PageFrameNumber
;
334 // Save it into our double-buffered system page directory
336 /* This seems to be making the assumption that one PDE is one page long */
337 C_ASSERT(PAGE_SIZE
== (PD_COUNT
* (sizeof(MMPTE
) * PDE_COUNT
)));
338 MmSystemPagePtes
[(ULONG_PTR
)PointerPte
& (PAGE_SIZE
- 1) /
339 sizeof(MMPTE
)] = TempPte
;
341 /* Initialize the PFN */
342 MiInitializePfnForOtherProcess(PageFrameNumber
,
344 MmSystemPageDirectory
[(PointerPte
- (PMMPTE
)PDE_BASE
) / PDE_COUNT
]);
346 /* Write the actual PTE now */
347 ASSERT(TempPte
.u
.Hard
.Valid
== 1);
348 *PointerPte
++ = TempPte
;
351 // Move on to the next expansion address
353 BaseVa
= (PVOID
)((ULONG_PTR
)BaseVa
+ PAGE_SIZE
);
357 // Release the PFN database lock
359 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
362 // These pages are now available, clear their availablity bits
364 RtlClearBits(MmPagedPoolInfo
.PagedPoolAllocationMap
,
365 (MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
-
366 MiAddressToPte(MmPagedPoolInfo
.FirstPteForPagedPool
)) *
371 // Update the next expansion location
373 MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
+= SizeInPages
;
376 // Zero out the newly available memory
378 RtlZeroMemory(BaseVaStart
, SizeInPages
* PAGE_SIZE
);
381 // Now try consuming the pages again
383 SizeInPages
= BYTES_TO_PAGES(SizeInBytes
);
384 i
= RtlFindClearBitsAndSet(MmPagedPoolInfo
.PagedPoolAllocationMap
,
392 DPRINT1("OUT OF PAGED POOL!!!\n");
393 KeReleaseGuardedMutex(&MmPagedPoolMutex
);
399 // Update the pool hint if the request was just one page
401 if (SizeInPages
== 1) MmPagedPoolInfo
.PagedPoolHint
= i
+ 1;
404 // Update the end bitmap so we know the bounds of this allocation when
405 // the time comes to free it
407 RtlSetBit(MmPagedPoolInfo
.EndOfPagedPoolBitmap
, i
+ SizeInPages
- 1);
410 // Now we can release the lock (it mainly protects the bitmap)
412 KeReleaseGuardedMutex(&MmPagedPoolMutex
);
415 // Now figure out where this allocation starts
417 BaseVa
= (PVOID
)((ULONG_PTR
)MmPagedPoolStart
+ (i
<< PAGE_SHIFT
));
422 KeFlushEntireTb(TRUE
, TRUE
);
424 /* Setup a demand-zero writable PTE */
425 DPRINT1("Setting up demand zero\n");
426 MI_MAKE_SOFTWARE_PTE(&TempPte
, MM_READWRITE
);
429 // Find the first and last PTE, then loop them all
431 PointerPte
= MiAddressToPte(BaseVa
);
432 StartPte
= PointerPte
+ SizeInPages
;
436 // Write the demand zero PTE and keep going
438 ASSERT(PointerPte
->u
.Hard
.Valid
== 0);
439 *PointerPte
++ = TempPte
;
440 } while (PointerPte
< StartPte
);
443 // Return the allocation address to the caller
449 // Allocations of less than 4 pages go into their individual buckets
452 if (i
>= MI_MAX_FREE_PAGE_LISTS
) i
= MI_MAX_FREE_PAGE_LISTS
- 1;
455 // Loop through all the free page lists based on the page index
457 NextHead
= &MmNonPagedPoolFreeListHead
[i
];
458 LastHead
= &MmNonPagedPoolFreeListHead
[MI_MAX_FREE_PAGE_LISTS
];
461 // Acquire the nonpaged pool lock
463 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock
);
467 // Now loop through all the free page entries in this given list
469 NextEntry
= NextHead
->Flink
;
470 while (NextEntry
!= NextHead
)
473 // Grab the entry and see if it can handle our allocation
475 FreeEntry
= CONTAINING_RECORD(NextEntry
, MMFREE_POOL_ENTRY
, List
);
476 if (FreeEntry
->Size
>= SizeInPages
)
479 // It does, so consume the pages from here
481 FreeEntry
->Size
-= SizeInPages
;
484 // The allocation will begin in this free page area
486 BaseVa
= (PVOID
)((ULONG_PTR
)FreeEntry
+
487 (FreeEntry
->Size
<< PAGE_SHIFT
));
490 // This is not a free page segment anymore
492 RemoveEntryList(&FreeEntry
->List
);
495 // However, check if its' still got space left
497 if (FreeEntry
->Size
!= 0)
500 // Insert it back into a different list, based on its pages
502 i
= FreeEntry
->Size
- 1;
503 if (i
>= MI_MAX_FREE_PAGE_LISTS
) i
= MI_MAX_FREE_PAGE_LISTS
- 1;
504 InsertTailList (&MmNonPagedPoolFreeListHead
[i
],
509 // Grab the PTE for this allocation
511 PointerPte
= MiAddressToPte(BaseVa
);
512 ASSERT(PointerPte
->u
.Hard
.Valid
== 1);
515 // Grab the PFN NextEntry and index
517 Pfn1
= MiGetPfnEntry(PFN_FROM_PTE(PointerPte
));
520 // Now mark it as the beginning of an allocation
522 ASSERT(Pfn1
->u3
.e1
.StartOfAllocation
== 0);
523 Pfn1
->u3
.e1
.StartOfAllocation
= 1;
526 // Check if the allocation is larger than one page
528 if (SizeInPages
!= 1)
531 // Navigate to the last PFN entry and PTE
533 PointerPte
+= SizeInPages
- 1;
534 ASSERT(PointerPte
->u
.Hard
.Valid
== 1);
535 Pfn1
= MiGetPfnEntry(PointerPte
->u
.Hard
.PageFrameNumber
);
539 // Mark this PFN as the last (might be the same as the first)
541 ASSERT(Pfn1
->u3
.e1
.EndOfAllocation
== 0);
542 Pfn1
->u3
.e1
.EndOfAllocation
= 1;
545 // Release the nonpaged pool lock, and return the allocation
547 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock
, OldIrql
);
552 // Try the next free page entry
554 NextEntry
= FreeEntry
->List
.Flink
;
556 } while (++NextHead
< LastHead
);
559 // If we got here, we're out of space.
560 // Start by releasing the lock
562 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock
, OldIrql
);
565 // Allocate some system PTEs
567 StartPte
= MiReserveSystemPtes(SizeInPages
, NonPagedPoolExpansion
);
568 PointerPte
= StartPte
;
569 if (StartPte
== NULL
)
574 DPRINT1("Out of NP Expansion Pool\n");
579 // Acquire the pool lock now
581 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock
);
584 // Lock the PFN database too
586 LockQueue
= &KeGetCurrentPrcb()->LockQueue
[LockQueuePfnLock
];
587 KeAcquireQueuedSpinLockAtDpcLevel(LockQueue
);
592 TempPte
= ValidKernelPte
;
595 /* Allocate a page */
596 PageFrameNumber
= MiRemoveAnyPage(0);
598 /* Get the PFN entry for it and fill it out */
599 Pfn1
= MiGetPfnEntry(PageFrameNumber
);
600 Pfn1
->u3
.e2
.ReferenceCount
= 1;
601 Pfn1
->u2
.ShareCount
= 1;
602 Pfn1
->PteAddress
= PointerPte
;
603 Pfn1
->u3
.e1
.PageLocation
= ActiveAndValid
;
604 Pfn1
->u4
.VerifierAllocation
= 0;
606 /* Write the PTE for it */
607 TempPte
.u
.Hard
.PageFrameNumber
= PageFrameNumber
;
608 ASSERT(PointerPte
->u
.Hard
.Valid
== 0);
609 ASSERT(TempPte
.u
.Hard
.Valid
== 1);
610 *PointerPte
++ = TempPte
;
611 } while (--SizeInPages
> 0);
614 // This is the last page
616 Pfn1
->u3
.e1
.EndOfAllocation
= 1;
619 // Get the first page and mark it as such
621 Pfn1
= MiGetPfnEntry(StartPte
->u
.Hard
.PageFrameNumber
);
622 Pfn1
->u3
.e1
.StartOfAllocation
= 1;
625 // Release the PFN and nonpaged pool lock
627 KeReleaseQueuedSpinLockFromDpcLevel(LockQueue
);
628 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock
, OldIrql
);
631 // Return the address
633 return MiPteToAddress(StartPte
);
638 MiFreePoolPages(IN PVOID StartingVa
)
640 PMMPTE PointerPte
, StartPte
;
641 PMMPFN Pfn1
, StartPfn
;
642 PFN_NUMBER FreePages
, NumberOfPages
;
644 PMMFREE_POOL_ENTRY FreeEntry
, NextEntry
, LastEntry
;
650 if ((StartingVa
>= MmPagedPoolStart
) && (StartingVa
<= MmPagedPoolEnd
))
653 // Calculate the offset from the beginning of paged pool, and convert it
656 i
= ((ULONG_PTR
)StartingVa
- (ULONG_PTR
)MmPagedPoolStart
) >> PAGE_SHIFT
;
660 // Now use the end bitmap to scan until we find a set bit, meaning that
661 // this allocation finishes here
663 while (!RtlTestBit(MmPagedPoolInfo
.EndOfPagedPoolBitmap
, End
)) End
++;
666 // Now calculate the total number of pages this allocation spans
668 NumberOfPages
= End
- i
+ 1;
670 /* Delete the actual pages */
671 PointerPte
= MmPagedPoolInfo
.FirstPteForPagedPool
+ i
;
672 FreePages
= MiDeleteSystemPageableVm(PointerPte
, NumberOfPages
, 0, NULL
);
673 ASSERT(FreePages
== NumberOfPages
);
676 // Acquire the paged pool lock
678 KeAcquireGuardedMutex(&MmPagedPoolMutex
);
681 // Clear the allocation and free bits
683 RtlClearBit(MmPagedPoolInfo
.EndOfPagedPoolBitmap
, i
);
684 RtlClearBits(MmPagedPoolInfo
.PagedPoolAllocationMap
, i
, NumberOfPages
);
687 // Update the hint if we need to
689 if (i
< MmPagedPoolInfo
.PagedPoolHint
) MmPagedPoolInfo
.PagedPoolHint
= i
;
692 // Release the lock protecting the bitmaps
694 KeReleaseGuardedMutex(&MmPagedPoolMutex
);
697 // And finally return the number of pages freed
699 return NumberOfPages
;
703 // Get the first PTE and its corresponding PFN entry
705 StartPte
= PointerPte
= MiAddressToPte(StartingVa
);
706 StartPfn
= Pfn1
= MiGetPfnEntry(PointerPte
->u
.Hard
.PageFrameNumber
);
709 // Loop until we find the last PTE
711 while (Pfn1
->u3
.e1
.EndOfAllocation
== 0)
717 Pfn1
= MiGetPfnEntry(PointerPte
->u
.Hard
.PageFrameNumber
);
721 // Now we know how many pages we have
723 NumberOfPages
= PointerPte
- StartPte
+ 1;
726 // Acquire the nonpaged pool lock
728 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock
);
731 // Mark the first and last PTEs as not part of an allocation anymore
733 StartPfn
->u3
.e1
.StartOfAllocation
= 0;
734 Pfn1
->u3
.e1
.EndOfAllocation
= 0;
737 // Assume we will free as many pages as the allocation was
739 FreePages
= NumberOfPages
;
742 // Peek one page past the end of the allocation
747 // Guard against going past initial nonpaged pool
749 if (MiGetPfnEntryIndex(Pfn1
) == MiEndOfInitialPoolFrame
)
752 // This page is on the outskirts of initial nonpaged pool, so ignore it
759 // Otherwise, our entire allocation must've fit within the initial non
760 // paged pool, or the expansion nonpaged pool, so get the PFN entry of
761 // the next allocation
763 ASSERT((ULONG_PTR
)StartingVa
+ NumberOfPages
<= (ULONG_PTR
)MmNonPagedPoolEnd
);
764 if (PointerPte
->u
.Hard
.Valid
== 1)
767 // It's either expansion or initial: get the PFN entry
769 Pfn1
= MiGetPfnEntry(PointerPte
->u
.Hard
.PageFrameNumber
);
774 // This means we've reached the guard page that protects the end of
775 // the expansion nonpaged pool
783 // Check if this allocation actually exists
785 if ((Pfn1
) && (Pfn1
->u3
.e1
.StartOfAllocation
== 0))
788 // It doesn't, so we should actually locate a free entry descriptor
790 FreeEntry
= (PMMFREE_POOL_ENTRY
)((ULONG_PTR
)StartingVa
+
791 (NumberOfPages
<< PAGE_SHIFT
));
792 ASSERT(FreeEntry
->Owner
== FreeEntry
);
795 // Consume this entry's pages, and remove it from its free list
797 FreePages
+= FreeEntry
->Size
;
798 RemoveEntryList (&FreeEntry
->List
);
802 // Now get the official free entry we'll create for the caller's allocation
804 FreeEntry
= StartingVa
;
807 // Check if the our allocation is the very first page
809 if (MiGetPfnEntryIndex(StartPfn
) == MiStartOfInitialPoolFrame
)
812 // Then we can't do anything or we'll risk underflowing
819 // Otherwise, get the PTE for the page right before our allocation
821 PointerPte
-= NumberOfPages
+ 1;
822 if (PointerPte
->u
.Hard
.Valid
== 1)
825 // It's either expansion or initial nonpaged pool, get the PFN entry
827 Pfn1
= MiGetPfnEntry(PointerPte
->u
.Hard
.PageFrameNumber
);
832 // We must've reached the guard page, so don't risk touching it
839 // Check if there is a valid PFN entry for the page before the allocation
840 // and then check if this page was actually the end of an allocation.
841 // If it wasn't, then we know for sure it's a free page
843 if ((Pfn1
) && (Pfn1
->u3
.e1
.EndOfAllocation
== 0))
846 // Get the free entry descriptor for that given page range
848 FreeEntry
= (PMMFREE_POOL_ENTRY
)((ULONG_PTR
)StartingVa
- PAGE_SIZE
);
849 FreeEntry
= FreeEntry
->Owner
;
852 // Check if the entry is small enough to be indexed on a free list
853 // If it is, we'll want to re-insert it, since we're about to
854 // collapse our pages on top of it, which will change its count
856 if (FreeEntry
->Size
< (MI_MAX_FREE_PAGE_LISTS
- 1))
859 // Remove the list from where it is now
861 RemoveEntryList(&FreeEntry
->List
);
866 FreeEntry
->Size
+= FreePages
;
869 // And now find the new appropriate list to place it in
871 i
= (ULONG
)(FreeEntry
->Size
- 1);
872 if (i
>= MI_MAX_FREE_PAGE_LISTS
) i
= MI_MAX_FREE_PAGE_LISTS
- 1;
877 InsertTailList(&MmNonPagedPoolFreeListHead
[i
], &FreeEntry
->List
);
882 // Otherwise, just combine our free pages into this entry
884 FreeEntry
->Size
+= FreePages
;
889 // Check if we were unable to do any compaction, and we'll stick with this
891 if (FreeEntry
== StartingVa
)
894 // Well, now we are a free entry. At worse we just have our newly freed
895 // pages, at best we have our pages plus whatever entry came after us
897 FreeEntry
->Size
= FreePages
;
900 // Find the appropriate list we should be on
902 i
= FreeEntry
->Size
- 1;
903 if (i
>= MI_MAX_FREE_PAGE_LISTS
) i
= MI_MAX_FREE_PAGE_LISTS
- 1;
908 InsertTailList (&MmNonPagedPoolFreeListHead
[i
], &FreeEntry
->List
);
912 // Just a sanity check
914 ASSERT(FreePages
!= 0);
917 // Get all the pages between our allocation and its end. These will all now
918 // become free page chunks.
920 NextEntry
= StartingVa
;
921 LastEntry
= (PMMFREE_POOL_ENTRY
)((ULONG_PTR
)NextEntry
+ (FreePages
<< PAGE_SHIFT
));
925 // Link back to the parent free entry, and keep going
927 NextEntry
->Owner
= FreeEntry
;
928 NextEntry
= (PMMFREE_POOL_ENTRY
)((ULONG_PTR
)NextEntry
+ PAGE_SIZE
);
929 } while (NextEntry
!= LastEntry
);
932 // We're done, release the lock and let the caller know how much we freed
934 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock
, OldIrql
);
935 return NumberOfPages
;
941 MiRaisePoolQuota(IN POOL_TYPE PoolType
,
942 IN ULONG CurrentMaxQuota
,
943 OUT PULONG NewMaxQuota
)
949 *NewMaxQuota
= CurrentMaxQuota
+ 65536;
953 /* PUBLIC FUNCTIONS ***********************************************************/
960 MmAllocateMappingAddress(IN SIZE_T NumberOfBytes
,
972 MmFreeMappingAddress(IN PVOID BaseAddress
,