2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/pool.c
5 * PURPOSE: ARM Memory Manager Pool Allocator
6 * PROGRAMMERS: ReactOS Portable Systems Group
9 /* INCLUDES *******************************************************************/
15 #line 15 "ARMĀ³::POOL"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
19 /* GLOBALS ********************************************************************/
21 LIST_ENTRY MmNonPagedPoolFreeListHead
[MI_MAX_FREE_PAGE_LISTS
];
22 PFN_NUMBER MmNumberOfFreeNonPagedPool
, MiExpansionPoolPagesInitialCharge
;
23 PVOID MmNonPagedPoolEnd0
;
24 PFN_NUMBER MiStartOfInitialPoolFrame
, MiEndOfInitialPoolFrame
;
25 KGUARDED_MUTEX MmPagedPoolMutex
;
26 MM_PAGED_POOL_INFO MmPagedPoolInfo
;
27 SIZE_T MmAllocatedNonPagedPool
;
28 ULONG MmSpecialPoolTag
;
29 ULONG MmConsumedPoolPercentage
;
30 BOOLEAN MmProtectFreedNonPagedPool
;
32 /* PRIVATE FUNCTIONS **********************************************************/
36 MiProtectFreeNonPagedPool(IN PVOID VirtualAddress
,
39 PMMPTE PointerPte
, LastPte
;
42 /* If pool is physical, can't protect PTEs */
43 if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress
)) return;
45 /* Get PTE pointers and loop */
46 PointerPte
= MiAddressToPte(VirtualAddress
);
47 LastPte
= PointerPte
+ PageCount
;
50 /* Capture the PTE for safety */
51 TempPte
= *PointerPte
;
53 /* Mark it as an invalid PTE, set proto bit to recognize it as pool */
54 TempPte
.u
.Hard
.Valid
= 0;
55 TempPte
.u
.Soft
.Prototype
= 1;
56 MI_WRITE_INVALID_PTE(PointerPte
, TempPte
);
57 } while (++PointerPte
< LastPte
);
60 KeFlushEntireTb(TRUE
, TRUE
);
65 MiUnProtectFreeNonPagedPool(IN PVOID VirtualAddress
,
70 PFN_NUMBER UnprotectedPages
= 0;
72 /* If pool is physical, can't protect PTEs */
73 if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress
)) return FALSE
;
75 /* Get, and capture the PTE */
76 PointerPte
= MiAddressToPte(VirtualAddress
);
77 TempPte
= *PointerPte
;
79 /* Loop protected PTEs */
80 while ((TempPte
.u
.Hard
.Valid
== 0) && (TempPte
.u
.Soft
.Prototype
== 1))
82 /* Unprotect the PTE */
83 TempPte
.u
.Hard
.Valid
= 1;
84 TempPte
.u
.Soft
.Prototype
= 0;
85 MI_WRITE_VALID_PTE(PointerPte
, TempPte
);
88 if (++UnprotectedPages
== PageCount
) break;
90 /* Capture next PTE */
91 TempPte
= *(++PointerPte
);
94 /* Return if any pages were unprotected */
95 return UnprotectedPages
? TRUE
: FALSE
;
100 MiProtectedPoolUnProtectLinks(IN PLIST_ENTRY Links
,
101 OUT PVOID
* PoolFlink
,
102 OUT PVOID
* PoolBlink
)
107 /* Initialize variables */
108 *PoolFlink
= *PoolBlink
= NULL
;
110 /* Check if the list has entries */
111 if (IsListEmpty(Links
) == FALSE
)
113 /* We are going to need to forward link to do an insert */
114 PoolVa
= Links
->Flink
;
116 /* So make it safe to access */
117 Safe
= MiUnProtectFreeNonPagedPool(PoolVa
, 1);
118 if (Safe
) PoolFlink
= PoolVa
;
121 /* Are we going to need a backward link too? */
122 if (Links
!= Links
->Blink
)
124 /* Get the head's backward link for the insert */
125 PoolVa
= Links
->Blink
;
127 /* Make it safe to access */
128 Safe
= MiUnProtectFreeNonPagedPool(PoolVa
, 1);
129 if (Safe
) PoolBlink
= PoolVa
;
135 MiProtectedPoolProtectLinks(IN PVOID PoolFlink
,
138 /* Reprotect the pages, if they got unprotected earlier */
139 if (PoolFlink
) MiProtectFreeNonPagedPool(PoolFlink
, 1);
140 if (PoolBlink
) MiProtectFreeNonPagedPool(PoolBlink
, 1);
145 MiProtectedPoolInsertList(IN PLIST_ENTRY ListHead
,
146 IN PLIST_ENTRY Entry
,
149 PVOID PoolFlink
, PoolBlink
;
151 /* Make the list accessible */
152 MiProtectedPoolUnProtectLinks(ListHead
, &PoolFlink
, &PoolBlink
);
154 /* Now insert in the right position */
155 Critical
? InsertHeadList(ListHead
, Entry
) : InsertTailList(ListHead
, Entry
);
157 /* And reprotect the pages containing the free links */
158 MiProtectedPoolProtectLinks(PoolFlink
, PoolBlink
);
163 MiProtectedPoolRemoveEntryList(IN PLIST_ENTRY Entry
)
165 PVOID PoolFlink
, PoolBlink
;
167 /* Make the list accessible */
168 MiProtectedPoolUnProtectLinks(Entry
, &PoolFlink
, &PoolBlink
);
171 RemoveEntryList(Entry
);
173 /* And reprotect the pages containing the free links */
174 if (PoolFlink
) MiProtectFreeNonPagedPool(PoolFlink
, 1);
175 if (PoolBlink
) MiProtectFreeNonPagedPool(PoolBlink
, 1);
181 MiInitializeNonPagedPoolThresholds(VOID
)
183 PFN_NUMBER Size
= MmMaximumNonPagedPoolInPages
;
185 /* Default low threshold of 8MB or one third of nonpaged pool */
186 MiLowNonPagedPoolThreshold
= (8 * _1MB
) >> PAGE_SHIFT
;
187 MiLowNonPagedPoolThreshold
= min(MiLowNonPagedPoolThreshold
, Size
/ 3);
189 /* Default high threshold of 20MB or 50% */
190 MiHighNonPagedPoolThreshold
= (20 * _1MB
) >> PAGE_SHIFT
;
191 MiHighNonPagedPoolThreshold
= min(MiHighNonPagedPoolThreshold
, Size
/ 2);
192 ASSERT(MiLowNonPagedPoolThreshold
< MiHighNonPagedPoolThreshold
);
198 MiInitializePoolEvents(VOID
)
201 PFN_NUMBER FreePoolInPages
;
203 /* Lock paged pool */
204 KeAcquireGuardedMutex(&MmPagedPoolMutex
);
206 /* Total size of the paged pool minus the allocated size, is free */
207 FreePoolInPages
= MmSizeOfPagedPoolInPages
- MmPagedPoolInfo
.AllocatedPagedPool
;
209 /* Check the initial state high state */
210 if (FreePoolInPages
>= MiHighPagedPoolThreshold
)
212 /* We have plenty of pool */
213 KeSetEvent(MiHighPagedPoolEvent
, 0, FALSE
);
218 KeClearEvent(MiHighPagedPoolEvent
);
221 /* Check the initial low state */
222 if (FreePoolInPages
<= MiLowPagedPoolThreshold
)
224 /* We're very low in free pool memory */
225 KeSetEvent(MiLowPagedPoolEvent
, 0, FALSE
);
230 KeClearEvent(MiLowPagedPoolEvent
);
233 /* Release the paged pool lock */
234 KeReleaseGuardedMutex(&MmPagedPoolMutex
);
236 /* Now it's time for the nonpaged pool lock */
237 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock
);
239 /* Free pages are the maximum minus what's been allocated */
240 FreePoolInPages
= MmMaximumNonPagedPoolInPages
- MmAllocatedNonPagedPool
;
242 /* Check if we have plenty */
243 if (FreePoolInPages
>= MiHighNonPagedPoolThreshold
)
245 /* We do, set the event */
246 KeSetEvent(MiHighNonPagedPoolEvent
, 0, FALSE
);
250 /* We don't, clear the event */
251 KeClearEvent(MiHighNonPagedPoolEvent
);
254 /* Check if we have very little */
255 if (FreePoolInPages
<= MiLowNonPagedPoolThreshold
)
257 /* We do, set the event */
258 KeSetEvent(MiLowNonPagedPoolEvent
, 0, FALSE
);
262 /* We don't, clear it */
263 KeClearEvent(MiLowNonPagedPoolEvent
);
266 /* We're done, release the nonpaged pool lock */
267 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock
, OldIrql
);
273 MiInitializeNonPagedPool(VOID
)
276 PFN_NUMBER PoolPages
;
277 PMMFREE_POOL_ENTRY FreeEntry
, FirstEntry
;
282 // We keep 4 lists of free pages (4 lists help avoid contention)
284 for (i
= 0; i
< MI_MAX_FREE_PAGE_LISTS
; i
++)
287 // Initialize each of them
289 InitializeListHead(&MmNonPagedPoolFreeListHead
[i
]);
293 // Calculate how many pages the initial nonpaged pool has
295 PoolPages
= BYTES_TO_PAGES(MmSizeOfNonPagedPoolInBytes
);
296 MmNumberOfFreeNonPagedPool
= PoolPages
;
299 // Initialize the first free entry
301 FreeEntry
= MmNonPagedPoolStart
;
302 FirstEntry
= FreeEntry
;
303 FreeEntry
->Size
= PoolPages
;
304 FreeEntry
->Signature
= MM_FREE_POOL_SIGNATURE
;
305 FreeEntry
->Owner
= FirstEntry
;
308 // Insert it into the last list
310 InsertHeadList(&MmNonPagedPoolFreeListHead
[MI_MAX_FREE_PAGE_LISTS
- 1],
314 // Now create free entries for every single other page
316 while (PoolPages
-- > 1)
319 // Link them all back to the original entry
321 FreeEntry
= (PMMFREE_POOL_ENTRY
)((ULONG_PTR
)FreeEntry
+ PAGE_SIZE
);
322 FreeEntry
->Owner
= FirstEntry
;
323 FreeEntry
->Signature
= MM_FREE_POOL_SIGNATURE
;
327 // Validate and remember first allocated pool page
329 PointerPte
= MiAddressToPte(MmNonPagedPoolStart
);
330 ASSERT(PointerPte
->u
.Hard
.Valid
== 1);
331 MiStartOfInitialPoolFrame
= PFN_FROM_PTE(PointerPte
);
334 // Keep track of where initial nonpaged pool ends
336 MmNonPagedPoolEnd0
= (PVOID
)((ULONG_PTR
)MmNonPagedPoolStart
+
337 MmSizeOfNonPagedPoolInBytes
);
340 // Validate and remember last allocated pool page
342 PointerPte
= MiAddressToPte((PVOID
)((ULONG_PTR
)MmNonPagedPoolEnd0
- 1));
343 ASSERT(PointerPte
->u
.Hard
.Valid
== 1);
344 MiEndOfInitialPoolFrame
= PFN_FROM_PTE(PointerPte
);
347 // Validate the first nonpaged pool expansion page (which is a guard page)
349 PointerPte
= MiAddressToPte(MmNonPagedPoolExpansionStart
);
350 ASSERT(PointerPte
->u
.Hard
.Valid
== 0);
353 // Calculate the size of the expansion region alone
355 MiExpansionPoolPagesInitialCharge
=
356 BYTES_TO_PAGES(MmMaximumNonPagedPoolInBytes
- MmSizeOfNonPagedPoolInBytes
);
359 // Remove 2 pages, since there's a guard page on top and on the bottom
361 MiExpansionPoolPagesInitialCharge
-= 2;
364 // Now initialize the nonpaged pool expansion PTE space. Remember there's a
365 // guard page on top so make sure to skip it. The bottom guard page will be
366 // guaranteed by the fact our size is off by one.
368 MiInitializeSystemPtes(PointerPte
+ 1,
369 MiExpansionPoolPagesInitialCharge
,
370 NonPagedPoolExpansion
);
375 MiAllocatePoolPages(IN POOL_TYPE PoolType
,
376 IN SIZE_T SizeInBytes
)
378 PFN_NUMBER SizeInPages
, PageFrameNumber
, PageTableCount
;
381 PLIST_ENTRY NextEntry
, NextHead
, LastHead
;
382 PMMPTE PointerPte
, StartPte
;
388 PVOID BaseVa
, BaseVaStart
;
389 PMMFREE_POOL_ENTRY FreeEntry
;
390 PKSPIN_LOCK_QUEUE LockQueue
;
393 // Figure out how big the allocation is in pages
395 SizeInPages
= BYTES_TO_PAGES(SizeInBytes
);
400 if ((PoolType
& BASE_POOL_TYPE_MASK
) == PagedPool
)
403 // Lock the paged pool mutex
405 KeAcquireGuardedMutex(&MmPagedPoolMutex
);
408 // Find some empty allocation space
410 i
= RtlFindClearBitsAndSet(MmPagedPoolInfo
.PagedPoolAllocationMap
,
412 MmPagedPoolInfo
.PagedPoolHint
);
416 // Get the page bit count
418 i
= ((SizeInPages
- 1) / PTE_COUNT
) + 1;
419 DPRINT1("Paged pool expansion: %d %x\n", i
, SizeInPages
);
422 // Check if there is enougn paged pool expansion space left
424 if (MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
>
425 (PMMPDE
)MiAddressToPte(MmPagedPoolInfo
.LastPteForPagedPool
))
430 DPRINT1("OUT OF PAGED POOL!!!\n");
431 KeReleaseGuardedMutex(&MmPagedPoolMutex
);
436 // Check if we'll have to expand past the last PTE we have available
438 if (((i
- 1) + MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
) >
439 (PMMPDE
)MiAddressToPte(MmPagedPoolInfo
.LastPteForPagedPool
))
442 // We can only support this much then
444 PageTableCount
= (PMMPDE
)MiAddressToPte(MmPagedPoolInfo
.LastPteForPagedPool
) -
445 MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
+
447 ASSERT(PageTableCount
< i
);
453 // Otherwise, there is plenty of space left for this expansion
459 // Get the template PDE we'll use to expand
461 TempPde
= ValidKernelPde
;
464 // Get the first PTE in expansion space
466 PointerPde
= MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
;
467 BaseVa
= MiPdeToAddress(PointerPde
);
468 BaseVaStart
= BaseVa
;
471 // Lock the PFN database and loop pages
473 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
477 // It should not already be valid
479 ASSERT(PointerPde
->u
.Hard
.Valid
== 0);
482 MI_SET_USAGE(MI_USAGE_PAGED_POOL
);
483 MI_SET_PROCESS2("Kernel");
484 PageFrameNumber
= MiRemoveAnyPage(MI_GET_NEXT_COLOR());
485 TempPde
.u
.Hard
.PageFrameNumber
= PageFrameNumber
;
486 #if (_MI_PAGING_LEVELS >= 3)
487 /* On PAE/x64 systems, there's no double-buffering */
491 // Save it into our double-buffered system page directory
493 MmSystemPagePtes
[((ULONG_PTR
)PointerPde
& (SYSTEM_PD_SIZE
- 1)) / sizeof(MMPTE
)] = TempPde
;
495 /* Initialize the PFN */
496 MiInitializePfnForOtherProcess(PageFrameNumber
,
498 MmSystemPageDirectory
[(PointerPde
- MiAddressToPde(NULL
)) / PDE_COUNT
]);
500 /* Write the actual PDE now */
501 MI_WRITE_VALID_PDE(PointerPde
, TempPde
);
504 // Move on to the next expansion address
507 BaseVa
= (PVOID
)((ULONG_PTR
)BaseVa
+ PAGE_SIZE
);
512 // Release the PFN database lock
514 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
517 // These pages are now available, clear their availablity bits
519 EndAllocation
= (MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
-
520 (PMMPDE
)MiAddressToPte(MmPagedPoolInfo
.FirstPteForPagedPool
)) *
522 RtlClearBits(MmPagedPoolInfo
.PagedPoolAllocationMap
,
524 PageTableCount
* PTE_COUNT
);
527 // Update the next expansion location
529 MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
+= PageTableCount
;
532 // Zero out the newly available memory
534 RtlZeroMemory(BaseVaStart
, PageTableCount
* PAGE_SIZE
);
537 // Now try consuming the pages again
539 i
= RtlFindClearBitsAndSet(MmPagedPoolInfo
.PagedPoolAllocationMap
,
547 DPRINT1("OUT OF PAGED POOL!!!\n");
548 KeReleaseGuardedMutex(&MmPagedPoolMutex
);
554 // Update the pool hint if the request was just one page
556 if (SizeInPages
== 1) MmPagedPoolInfo
.PagedPoolHint
= i
+ 1;
559 // Update the end bitmap so we know the bounds of this allocation when
560 // the time comes to free it
562 EndAllocation
= i
+ SizeInPages
- 1;
563 RtlSetBit(MmPagedPoolInfo
.EndOfPagedPoolBitmap
, EndAllocation
);
566 // Now we can release the lock (it mainly protects the bitmap)
568 KeReleaseGuardedMutex(&MmPagedPoolMutex
);
571 // Now figure out where this allocation starts
573 BaseVa
= (PVOID
)((ULONG_PTR
)MmPagedPoolStart
+ (i
<< PAGE_SHIFT
));
578 KeFlushEntireTb(TRUE
, TRUE
);
580 /* Setup a demand-zero writable PTE */
581 MI_MAKE_SOFTWARE_PTE(&TempPte
, MM_READWRITE
);
584 // Find the first and last PTE, then loop them all
586 PointerPte
= MiAddressToPte(BaseVa
);
587 StartPte
= PointerPte
+ SizeInPages
;
591 // Write the demand zero PTE and keep going
593 MI_WRITE_INVALID_PTE(PointerPte
, TempPte
);
594 } while (++PointerPte
< StartPte
);
597 // Return the allocation address to the caller
603 // Allocations of less than 4 pages go into their individual buckets
606 if (i
>= MI_MAX_FREE_PAGE_LISTS
) i
= MI_MAX_FREE_PAGE_LISTS
- 1;
609 // Loop through all the free page lists based on the page index
611 NextHead
= &MmNonPagedPoolFreeListHead
[i
];
612 LastHead
= &MmNonPagedPoolFreeListHead
[MI_MAX_FREE_PAGE_LISTS
];
615 // Acquire the nonpaged pool lock
617 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock
);
621 // Now loop through all the free page entries in this given list
623 NextEntry
= NextHead
->Flink
;
624 while (NextEntry
!= NextHead
)
626 /* Is freed non paged pool enabled */
627 if (MmProtectFreedNonPagedPool
)
629 /* We need to be able to touch this page, unprotect it */
630 MiUnProtectFreeNonPagedPool(NextEntry
, 0);
634 // Grab the entry and see if it can handle our allocation
636 FreeEntry
= CONTAINING_RECORD(NextEntry
, MMFREE_POOL_ENTRY
, List
);
637 ASSERT(FreeEntry
->Signature
== MM_FREE_POOL_SIGNATURE
);
638 if (FreeEntry
->Size
>= SizeInPages
)
641 // It does, so consume the pages from here
643 FreeEntry
->Size
-= SizeInPages
;
646 // The allocation will begin in this free page area
648 BaseVa
= (PVOID
)((ULONG_PTR
)FreeEntry
+
649 (FreeEntry
->Size
<< PAGE_SHIFT
));
651 /* Remove the item from the list, depending if pool is protected */
652 MmProtectFreedNonPagedPool
?
653 MiProtectedPoolRemoveEntryList(&FreeEntry
->List
) :
654 RemoveEntryList(&FreeEntry
->List
);
657 // However, check if its' still got space left
659 if (FreeEntry
->Size
!= 0)
661 /* Check which list to insert this entry into */
662 i
= FreeEntry
->Size
- 1;
663 if (i
>= MI_MAX_FREE_PAGE_LISTS
) i
= MI_MAX_FREE_PAGE_LISTS
- 1;
665 /* Insert the entry into the free list head, check for prot. pool */
666 MmProtectFreedNonPagedPool
?
667 MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead
[i
], &FreeEntry
->List
, TRUE
) :
668 InsertTailList(&MmNonPagedPoolFreeListHead
[i
], &FreeEntry
->List
);
670 /* Is freed non paged pool protected? */
671 if (MmProtectFreedNonPagedPool
)
673 /* Protect the freed pool! */
674 MiProtectFreeNonPagedPool(FreeEntry
, FreeEntry
->Size
);
679 // Grab the PTE for this allocation
681 PointerPte
= MiAddressToPte(BaseVa
);
682 ASSERT(PointerPte
->u
.Hard
.Valid
== 1);
685 // Grab the PFN NextEntry and index
687 Pfn1
= MiGetPfnEntry(PFN_FROM_PTE(PointerPte
));
690 // Now mark it as the beginning of an allocation
692 ASSERT(Pfn1
->u3
.e1
.StartOfAllocation
== 0);
693 Pfn1
->u3
.e1
.StartOfAllocation
= 1;
695 /* Mark it as special pool if needed */
696 ASSERT(Pfn1
->u4
.VerifierAllocation
== 0);
697 if (PoolType
& 64) Pfn1
->u4
.VerifierAllocation
= 1;
700 // Check if the allocation is larger than one page
702 if (SizeInPages
!= 1)
705 // Navigate to the last PFN entry and PTE
707 PointerPte
+= SizeInPages
- 1;
708 ASSERT(PointerPte
->u
.Hard
.Valid
== 1);
709 Pfn1
= MiGetPfnEntry(PointerPte
->u
.Hard
.PageFrameNumber
);
713 // Mark this PFN as the last (might be the same as the first)
715 ASSERT(Pfn1
->u3
.e1
.EndOfAllocation
== 0);
716 Pfn1
->u3
.e1
.EndOfAllocation
= 1;
719 // Release the nonpaged pool lock, and return the allocation
721 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock
, OldIrql
);
726 // Try the next free page entry
728 NextEntry
= FreeEntry
->List
.Flink
;
730 /* Is freed non paged pool protected? */
731 if (MmProtectFreedNonPagedPool
)
733 /* Protect the freed pool! */
734 MiProtectFreeNonPagedPool(FreeEntry
, FreeEntry
->Size
);
737 } while (++NextHead
< LastHead
);
740 // If we got here, we're out of space.
741 // Start by releasing the lock
743 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock
, OldIrql
);
746 // Allocate some system PTEs
748 StartPte
= MiReserveSystemPtes(SizeInPages
, NonPagedPoolExpansion
);
749 PointerPte
= StartPte
;
750 if (StartPte
== NULL
)
755 DPRINT1("Out of NP Expansion Pool\n");
760 // Acquire the pool lock now
762 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock
);
765 // Lock the PFN database too
767 LockQueue
= &KeGetCurrentPrcb()->LockQueue
[LockQueuePfnLock
];
768 KeAcquireQueuedSpinLockAtDpcLevel(LockQueue
);
773 TempPte
= ValidKernelPte
;
776 /* Allocate a page */
777 MI_SET_USAGE(MI_USAGE_PAGED_POOL
);
778 MI_SET_PROCESS2("Kernel");
779 PageFrameNumber
= MiRemoveAnyPage(MI_GET_NEXT_COLOR());
781 /* Get the PFN entry for it and fill it out */
782 Pfn1
= MiGetPfnEntry(PageFrameNumber
);
783 Pfn1
->u3
.e2
.ReferenceCount
= 1;
784 Pfn1
->u2
.ShareCount
= 1;
785 Pfn1
->PteAddress
= PointerPte
;
786 Pfn1
->u3
.e1
.PageLocation
= ActiveAndValid
;
787 Pfn1
->u4
.VerifierAllocation
= 0;
789 /* Write the PTE for it */
790 TempPte
.u
.Hard
.PageFrameNumber
= PageFrameNumber
;
791 MI_WRITE_VALID_PTE(PointerPte
++, TempPte
);
792 } while (--SizeInPages
> 0);
795 // This is the last page
797 Pfn1
->u3
.e1
.EndOfAllocation
= 1;
800 // Get the first page and mark it as such
802 Pfn1
= MiGetPfnEntry(StartPte
->u
.Hard
.PageFrameNumber
);
803 Pfn1
->u3
.e1
.StartOfAllocation
= 1;
805 /* Mark it as a verifier allocation if needed */
806 ASSERT(Pfn1
->u4
.VerifierAllocation
== 0);
807 if (PoolType
& 64) Pfn1
->u4
.VerifierAllocation
= 1;
810 // Release the PFN and nonpaged pool lock
812 KeReleaseQueuedSpinLockFromDpcLevel(LockQueue
);
813 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock
, OldIrql
);
816 // Return the address
818 return MiPteToAddress(StartPte
);
823 MiFreePoolPages(IN PVOID StartingVa
)
825 PMMPTE PointerPte
, StartPte
;
826 PMMPFN Pfn1
, StartPfn
;
827 PFN_NUMBER FreePages
, NumberOfPages
;
829 PMMFREE_POOL_ENTRY FreeEntry
, NextEntry
, LastEntry
;
835 if ((StartingVa
>= MmPagedPoolStart
) && (StartingVa
<= MmPagedPoolEnd
))
838 // Calculate the offset from the beginning of paged pool, and convert it
841 i
= ((ULONG_PTR
)StartingVa
- (ULONG_PTR
)MmPagedPoolStart
) >> PAGE_SHIFT
;
845 // Now use the end bitmap to scan until we find a set bit, meaning that
846 // this allocation finishes here
848 while (!RtlTestBit(MmPagedPoolInfo
.EndOfPagedPoolBitmap
, End
)) End
++;
851 // Now calculate the total number of pages this allocation spans
853 NumberOfPages
= End
- i
+ 1;
855 /* Delete the actual pages */
856 PointerPte
= MmPagedPoolInfo
.FirstPteForPagedPool
+ i
;
857 FreePages
= MiDeleteSystemPageableVm(PointerPte
, NumberOfPages
, 0, NULL
);
858 ASSERT(FreePages
== NumberOfPages
);
861 // Acquire the paged pool lock
863 KeAcquireGuardedMutex(&MmPagedPoolMutex
);
866 // Clear the allocation and free bits
868 RtlClearBit(MmPagedPoolInfo
.EndOfPagedPoolBitmap
, i
);
869 RtlClearBits(MmPagedPoolInfo
.PagedPoolAllocationMap
, i
, NumberOfPages
);
872 // Update the hint if we need to
874 if (i
< MmPagedPoolInfo
.PagedPoolHint
) MmPagedPoolInfo
.PagedPoolHint
= i
;
877 // Release the lock protecting the bitmaps
879 KeReleaseGuardedMutex(&MmPagedPoolMutex
);
882 // And finally return the number of pages freed
884 return NumberOfPages
;
888 // Get the first PTE and its corresponding PFN entry
890 StartPte
= PointerPte
= MiAddressToPte(StartingVa
);
891 StartPfn
= Pfn1
= MiGetPfnEntry(PointerPte
->u
.Hard
.PageFrameNumber
);
894 // Loop until we find the last PTE
896 while (Pfn1
->u3
.e1
.EndOfAllocation
== 0)
902 Pfn1
= MiGetPfnEntry(PointerPte
->u
.Hard
.PageFrameNumber
);
906 // Now we know how many pages we have
908 NumberOfPages
= PointerPte
- StartPte
+ 1;
911 // Acquire the nonpaged pool lock
913 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock
);
916 // Mark the first and last PTEs as not part of an allocation anymore
918 StartPfn
->u3
.e1
.StartOfAllocation
= 0;
919 Pfn1
->u3
.e1
.EndOfAllocation
= 0;
922 // Assume we will free as many pages as the allocation was
924 FreePages
= NumberOfPages
;
927 // Peek one page past the end of the allocation
932 // Guard against going past initial nonpaged pool
934 if (MiGetPfnEntryIndex(Pfn1
) == MiEndOfInitialPoolFrame
)
937 // This page is on the outskirts of initial nonpaged pool, so ignore it
944 ASSERT((ULONG_PTR
)StartingVa
+ NumberOfPages
<= (ULONG_PTR
)MmNonPagedPoolEnd
);
946 /* Check if protected pool is enabled */
947 if (MmProtectFreedNonPagedPool
)
949 /* The freed block will be merged, it must be made accessible */
950 MiUnProtectFreeNonPagedPool(MiPteToAddress(PointerPte
), 0);
954 // Otherwise, our entire allocation must've fit within the initial non
955 // paged pool, or the expansion nonpaged pool, so get the PFN entry of
956 // the next allocation
958 if (PointerPte
->u
.Hard
.Valid
== 1)
961 // It's either expansion or initial: get the PFN entry
963 Pfn1
= MiGetPfnEntry(PointerPte
->u
.Hard
.PageFrameNumber
);
968 // This means we've reached the guard page that protects the end of
969 // the expansion nonpaged pool
977 // Check if this allocation actually exists
979 if ((Pfn1
) && (Pfn1
->u3
.e1
.StartOfAllocation
== 0))
982 // It doesn't, so we should actually locate a free entry descriptor
984 FreeEntry
= (PMMFREE_POOL_ENTRY
)((ULONG_PTR
)StartingVa
+
985 (NumberOfPages
<< PAGE_SHIFT
));
986 ASSERT(FreeEntry
->Signature
== MM_FREE_POOL_SIGNATURE
);
987 ASSERT(FreeEntry
->Owner
== FreeEntry
);
989 /* Consume this entry's pages */
990 FreePages
+= FreeEntry
->Size
;
992 /* Remove the item from the list, depending if pool is protected */
993 MmProtectFreedNonPagedPool
?
994 MiProtectedPoolRemoveEntryList(&FreeEntry
->List
) :
995 RemoveEntryList(&FreeEntry
->List
);
999 // Now get the official free entry we'll create for the caller's allocation
1001 FreeEntry
= StartingVa
;
1004 // Check if the our allocation is the very first page
1006 if (MiGetPfnEntryIndex(StartPfn
) == MiStartOfInitialPoolFrame
)
1009 // Then we can't do anything or we'll risk underflowing
1016 // Otherwise, get the PTE for the page right before our allocation
1018 PointerPte
-= NumberOfPages
+ 1;
1020 /* Check if protected pool is enabled */
1021 if (MmProtectFreedNonPagedPool
)
1023 /* The freed block will be merged, it must be made accessible */
1024 MiUnProtectFreeNonPagedPool(MiPteToAddress(PointerPte
), 0);
1027 /* Check if this is valid pool, or a guard page */
1028 if (PointerPte
->u
.Hard
.Valid
== 1)
1031 // It's either expansion or initial nonpaged pool, get the PFN entry
1033 Pfn1
= MiGetPfnEntry(PointerPte
->u
.Hard
.PageFrameNumber
);
1038 // We must've reached the guard page, so don't risk touching it
1045 // Check if there is a valid PFN entry for the page before the allocation
1046 // and then check if this page was actually the end of an allocation.
1047 // If it wasn't, then we know for sure it's a free page
1049 if ((Pfn1
) && (Pfn1
->u3
.e1
.EndOfAllocation
== 0))
1052 // Get the free entry descriptor for that given page range
1054 FreeEntry
= (PMMFREE_POOL_ENTRY
)((ULONG_PTR
)StartingVa
- PAGE_SIZE
);
1055 ASSERT(FreeEntry
->Signature
== MM_FREE_POOL_SIGNATURE
);
1056 FreeEntry
= FreeEntry
->Owner
;
1058 /* Check if protected pool is enabled */
1059 if (MmProtectFreedNonPagedPool
)
1061 /* The freed block will be merged, it must be made accessible */
1062 MiUnProtectFreeNonPagedPool(FreeEntry
, 0);
1066 // Check if the entry is small enough to be indexed on a free list
1067 // If it is, we'll want to re-insert it, since we're about to
1068 // collapse our pages on top of it, which will change its count
1070 if (FreeEntry
->Size
< (MI_MAX_FREE_PAGE_LISTS
- 1))
1072 /* Remove the item from the list, depending if pool is protected */
1073 MmProtectFreedNonPagedPool
?
1074 MiProtectedPoolRemoveEntryList(&FreeEntry
->List
) :
1075 RemoveEntryList(&FreeEntry
->List
);
1080 FreeEntry
->Size
+= FreePages
;
1083 // And now find the new appropriate list to place it in
1085 i
= (ULONG
)(FreeEntry
->Size
- 1);
1086 if (i
>= MI_MAX_FREE_PAGE_LISTS
) i
= MI_MAX_FREE_PAGE_LISTS
- 1;
1088 /* Insert the entry into the free list head, check for prot. pool */
1089 MmProtectFreedNonPagedPool
?
1090 MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead
[i
], &FreeEntry
->List
, TRUE
) :
1091 InsertTailList(&MmNonPagedPoolFreeListHead
[i
], &FreeEntry
->List
);
1096 // Otherwise, just combine our free pages into this entry
1098 FreeEntry
->Size
+= FreePages
;
1103 // Check if we were unable to do any compaction, and we'll stick with this
1105 if (FreeEntry
== StartingVa
)
1108 // Well, now we are a free entry. At worse we just have our newly freed
1109 // pages, at best we have our pages plus whatever entry came after us
1111 FreeEntry
->Size
= FreePages
;
1114 // Find the appropriate list we should be on
1116 i
= FreeEntry
->Size
- 1;
1117 if (i
>= MI_MAX_FREE_PAGE_LISTS
) i
= MI_MAX_FREE_PAGE_LISTS
- 1;
1119 /* Insert the entry into the free list head, check for prot. pool */
1120 MmProtectFreedNonPagedPool
?
1121 MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead
[i
], &FreeEntry
->List
, TRUE
) :
1122 InsertTailList(&MmNonPagedPoolFreeListHead
[i
], &FreeEntry
->List
);
1126 // Just a sanity check
1128 ASSERT(FreePages
!= 0);
1131 // Get all the pages between our allocation and its end. These will all now
1132 // become free page chunks.
1134 NextEntry
= StartingVa
;
1135 LastEntry
= (PMMFREE_POOL_ENTRY
)((ULONG_PTR
)NextEntry
+ (FreePages
<< PAGE_SHIFT
));
1139 // Link back to the parent free entry, and keep going
1141 NextEntry
->Owner
= FreeEntry
;
1142 NextEntry
->Signature
= MM_FREE_POOL_SIGNATURE
;
1143 NextEntry
= (PMMFREE_POOL_ENTRY
)((ULONG_PTR
)NextEntry
+ PAGE_SIZE
);
1144 } while (NextEntry
!= LastEntry
);
1146 /* Is freed non paged pool protected? */
1147 if (MmProtectFreedNonPagedPool
)
1149 /* Protect the freed pool! */
1150 MiProtectFreeNonPagedPool(FreeEntry
, FreeEntry
->Size
);
1154 // We're done, release the lock and let the caller know how much we freed
1156 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock
, OldIrql
);
1157 return NumberOfPages
;
1163 MiRaisePoolQuota(IN POOL_TYPE PoolType
,
1164 IN ULONG CurrentMaxQuota
,
1165 OUT PULONG NewMaxQuota
)
1171 *NewMaxQuota
= CurrentMaxQuota
+ 65536;
1175 /* PUBLIC FUNCTIONS ***********************************************************/
1182 MmAllocateMappingAddress(IN SIZE_T NumberOfBytes
,
1194 MmFreeMappingAddress(IN PVOID BaseAddress
,