2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/pool.c
5 * PURPOSE: ARM Memory Manager Pool Allocator
6 * PROGRAMMERS: ReactOS Portable Systems Group
9 /* INCLUDES *******************************************************************/
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
18 /* GLOBALS ********************************************************************/
20 LIST_ENTRY MmNonPagedPoolFreeListHead
[MI_MAX_FREE_PAGE_LISTS
];
21 PFN_COUNT MmNumberOfFreeNonPagedPool
, MiExpansionPoolPagesInitialCharge
;
22 PVOID MmNonPagedPoolEnd0
;
23 PFN_NUMBER MiStartOfInitialPoolFrame
, MiEndOfInitialPoolFrame
;
24 KGUARDED_MUTEX MmPagedPoolMutex
;
25 MM_PAGED_POOL_INFO MmPagedPoolInfo
;
26 SIZE_T MmAllocatedNonPagedPool
;
27 ULONG MmSpecialPoolTag
;
28 ULONG MmConsumedPoolPercentage
;
29 BOOLEAN MmProtectFreedNonPagedPool
;
30 SLIST_HEADER MiNonPagedPoolSListHead
;
31 ULONG MiNonPagedPoolSListMaximum
= 4;
32 SLIST_HEADER MiPagedPoolSListHead
;
33 ULONG MiPagedPoolSListMaximum
= 8;
35 /* PRIVATE FUNCTIONS **********************************************************/
39 MiProtectFreeNonPagedPool(IN PVOID VirtualAddress
,
42 PMMPTE PointerPte
, LastPte
;
45 /* If pool is physical, can't protect PTEs */
46 if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress
)) return;
48 /* Get PTE pointers and loop */
49 PointerPte
= MiAddressToPte(VirtualAddress
);
50 LastPte
= PointerPte
+ PageCount
;
53 /* Capture the PTE for safety */
54 TempPte
= *PointerPte
;
56 /* Mark it as an invalid PTE, set proto bit to recognize it as pool */
57 TempPte
.u
.Hard
.Valid
= 0;
58 TempPte
.u
.Soft
.Prototype
= 1;
59 MI_WRITE_INVALID_PTE(PointerPte
, TempPte
);
60 } while (++PointerPte
< LastPte
);
63 KeFlushEntireTb(TRUE
, TRUE
);
68 MiUnProtectFreeNonPagedPool(IN PVOID VirtualAddress
,
73 PFN_NUMBER UnprotectedPages
= 0;
75 /* If pool is physical, can't protect PTEs */
76 if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress
)) return FALSE
;
78 /* Get, and capture the PTE */
79 PointerPte
= MiAddressToPte(VirtualAddress
);
80 TempPte
= *PointerPte
;
82 /* Loop protected PTEs */
83 while ((TempPte
.u
.Hard
.Valid
== 0) && (TempPte
.u
.Soft
.Prototype
== 1))
85 /* Unprotect the PTE */
86 TempPte
.u
.Hard
.Valid
= 1;
87 TempPte
.u
.Soft
.Prototype
= 0;
88 MI_WRITE_VALID_PTE(PointerPte
, TempPte
);
91 if (++UnprotectedPages
== PageCount
) break;
93 /* Capture next PTE */
94 TempPte
= *(++PointerPte
);
97 /* Return if any pages were unprotected */
98 return UnprotectedPages
? TRUE
: FALSE
;
103 MiProtectedPoolUnProtectLinks(IN PLIST_ENTRY Links
,
104 OUT PVOID
* PoolFlink
,
105 OUT PVOID
* PoolBlink
)
110 /* Initialize variables */
111 *PoolFlink
= *PoolBlink
= NULL
;
113 /* Check if the list has entries */
114 if (IsListEmpty(Links
) == FALSE
)
116 /* We are going to need to forward link to do an insert */
117 PoolVa
= Links
->Flink
;
119 /* So make it safe to access */
120 Safe
= MiUnProtectFreeNonPagedPool(PoolVa
, 1);
121 if (Safe
) *PoolFlink
= PoolVa
;
124 /* Are we going to need a backward link too? */
125 if (Links
!= Links
->Blink
)
127 /* Get the head's backward link for the insert */
128 PoolVa
= Links
->Blink
;
130 /* Make it safe to access */
131 Safe
= MiUnProtectFreeNonPagedPool(PoolVa
, 1);
132 if (Safe
) *PoolBlink
= PoolVa
;
138 MiProtectedPoolProtectLinks(IN PVOID PoolFlink
,
141 /* Reprotect the pages, if they got unprotected earlier */
142 if (PoolFlink
) MiProtectFreeNonPagedPool(PoolFlink
, 1);
143 if (PoolBlink
) MiProtectFreeNonPagedPool(PoolBlink
, 1);
148 MiProtectedPoolInsertList(IN PLIST_ENTRY ListHead
,
149 IN PLIST_ENTRY Entry
,
152 PVOID PoolFlink
, PoolBlink
;
154 /* Make the list accessible */
155 MiProtectedPoolUnProtectLinks(ListHead
, &PoolFlink
, &PoolBlink
);
157 /* Now insert in the right position */
158 Critical
? InsertHeadList(ListHead
, Entry
) : InsertTailList(ListHead
, Entry
);
160 /* And reprotect the pages containing the free links */
161 MiProtectedPoolProtectLinks(PoolFlink
, PoolBlink
);
166 MiProtectedPoolRemoveEntryList(IN PLIST_ENTRY Entry
)
168 PVOID PoolFlink
, PoolBlink
;
170 /* Make the list accessible */
171 MiProtectedPoolUnProtectLinks(Entry
, &PoolFlink
, &PoolBlink
);
174 RemoveEntryList(Entry
);
176 /* And reprotect the pages containing the free links */
177 if (PoolFlink
) MiProtectFreeNonPagedPool(PoolFlink
, 1);
178 if (PoolBlink
) MiProtectFreeNonPagedPool(PoolBlink
, 1);
184 MiInitializeNonPagedPoolThresholds(VOID
)
186 PFN_NUMBER Size
= MmMaximumNonPagedPoolInPages
;
188 /* Default low threshold of 8MB or one third of nonpaged pool */
189 MiLowNonPagedPoolThreshold
= (8 * _1MB
) >> PAGE_SHIFT
;
190 MiLowNonPagedPoolThreshold
= min(MiLowNonPagedPoolThreshold
, Size
/ 3);
192 /* Default high threshold of 20MB or 50% */
193 MiHighNonPagedPoolThreshold
= (20 * _1MB
) >> PAGE_SHIFT
;
194 MiHighNonPagedPoolThreshold
= min(MiHighNonPagedPoolThreshold
, Size
/ 2);
195 ASSERT(MiLowNonPagedPoolThreshold
< MiHighNonPagedPoolThreshold
);
201 MiInitializePoolEvents(VOID
)
204 PFN_NUMBER FreePoolInPages
;
206 /* Lock paged pool */
207 KeAcquireGuardedMutex(&MmPagedPoolMutex
);
209 /* Total size of the paged pool minus the allocated size, is free */
210 FreePoolInPages
= MmSizeOfPagedPoolInPages
- MmPagedPoolInfo
.AllocatedPagedPool
;
212 /* Check the initial state high state */
213 if (FreePoolInPages
>= MiHighPagedPoolThreshold
)
215 /* We have plenty of pool */
216 KeSetEvent(MiHighPagedPoolEvent
, 0, FALSE
);
221 KeClearEvent(MiHighPagedPoolEvent
);
224 /* Check the initial low state */
225 if (FreePoolInPages
<= MiLowPagedPoolThreshold
)
227 /* We're very low in free pool memory */
228 KeSetEvent(MiLowPagedPoolEvent
, 0, FALSE
);
233 KeClearEvent(MiLowPagedPoolEvent
);
236 /* Release the paged pool lock */
237 KeReleaseGuardedMutex(&MmPagedPoolMutex
);
239 /* Now it's time for the nonpaged pool lock */
240 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock
);
242 /* Free pages are the maximum minus what's been allocated */
243 FreePoolInPages
= MmMaximumNonPagedPoolInPages
- MmAllocatedNonPagedPool
;
245 /* Check if we have plenty */
246 if (FreePoolInPages
>= MiHighNonPagedPoolThreshold
)
248 /* We do, set the event */
249 KeSetEvent(MiHighNonPagedPoolEvent
, 0, FALSE
);
253 /* We don't, clear the event */
254 KeClearEvent(MiHighNonPagedPoolEvent
);
257 /* Check if we have very little */
258 if (FreePoolInPages
<= MiLowNonPagedPoolThreshold
)
260 /* We do, set the event */
261 KeSetEvent(MiLowNonPagedPoolEvent
, 0, FALSE
);
265 /* We don't, clear it */
266 KeClearEvent(MiLowNonPagedPoolEvent
);
269 /* We're done, release the nonpaged pool lock */
270 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock
, OldIrql
);
276 MiInitializeNonPagedPool(VOID
)
280 PMMFREE_POOL_ENTRY FreeEntry
, FirstEntry
;
285 // Initialize the pool S-LISTs as well as their maximum count. In general,
286 // we'll allow 8 times the default on a 2GB system, and two times the default
289 InitializeSListHead(&MiPagedPoolSListHead
);
290 InitializeSListHead(&MiNonPagedPoolSListHead
);
291 if (MmNumberOfPhysicalPages
>= ((2 * _1GB
) /PAGE_SIZE
))
293 MiNonPagedPoolSListMaximum
*= 8;
294 MiPagedPoolSListMaximum
*= 8;
296 else if (MmNumberOfPhysicalPages
>= (_1GB
/PAGE_SIZE
))
298 MiNonPagedPoolSListMaximum
*= 2;
299 MiPagedPoolSListMaximum
*= 2;
303 // However if debugging options for the pool are enabled, turn off the S-LIST
304 // to reduce the risk of messing things up even more
306 if (MmProtectFreedNonPagedPool
)
308 MiNonPagedPoolSListMaximum
= 0;
309 MiPagedPoolSListMaximum
= 0;
313 // We keep 4 lists of free pages (4 lists help avoid contention)
315 for (i
= 0; i
< MI_MAX_FREE_PAGE_LISTS
; i
++)
318 // Initialize each of them
320 InitializeListHead(&MmNonPagedPoolFreeListHead
[i
]);
324 // Calculate how many pages the initial nonpaged pool has
326 PoolPages
= (PFN_COUNT
)BYTES_TO_PAGES(MmSizeOfNonPagedPoolInBytes
);
327 MmNumberOfFreeNonPagedPool
= PoolPages
;
330 // Initialize the first free entry
332 FreeEntry
= MmNonPagedPoolStart
;
333 FirstEntry
= FreeEntry
;
334 FreeEntry
->Size
= PoolPages
;
335 FreeEntry
->Signature
= MM_FREE_POOL_SIGNATURE
;
336 FreeEntry
->Owner
= FirstEntry
;
339 // Insert it into the last list
341 InsertHeadList(&MmNonPagedPoolFreeListHead
[MI_MAX_FREE_PAGE_LISTS
- 1],
345 // Now create free entries for every single other page
347 while (PoolPages
-- > 1)
350 // Link them all back to the original entry
352 FreeEntry
= (PMMFREE_POOL_ENTRY
)((ULONG_PTR
)FreeEntry
+ PAGE_SIZE
);
353 FreeEntry
->Owner
= FirstEntry
;
354 FreeEntry
->Signature
= MM_FREE_POOL_SIGNATURE
;
358 // Validate and remember first allocated pool page
360 PointerPte
= MiAddressToPte(MmNonPagedPoolStart
);
361 ASSERT(PointerPte
->u
.Hard
.Valid
== 1);
362 MiStartOfInitialPoolFrame
= PFN_FROM_PTE(PointerPte
);
365 // Keep track of where initial nonpaged pool ends
367 MmNonPagedPoolEnd0
= (PVOID
)((ULONG_PTR
)MmNonPagedPoolStart
+
368 MmSizeOfNonPagedPoolInBytes
);
371 // Validate and remember last allocated pool page
373 PointerPte
= MiAddressToPte((PVOID
)((ULONG_PTR
)MmNonPagedPoolEnd0
- 1));
374 ASSERT(PointerPte
->u
.Hard
.Valid
== 1);
375 MiEndOfInitialPoolFrame
= PFN_FROM_PTE(PointerPte
);
378 // Validate the first nonpaged pool expansion page (which is a guard page)
380 PointerPte
= MiAddressToPte(MmNonPagedPoolExpansionStart
);
381 ASSERT(PointerPte
->u
.Hard
.Valid
== 0);
384 // Calculate the size of the expansion region alone
386 MiExpansionPoolPagesInitialCharge
= (PFN_COUNT
)
387 BYTES_TO_PAGES(MmMaximumNonPagedPoolInBytes
- MmSizeOfNonPagedPoolInBytes
);
390 // Remove 2 pages, since there's a guard page on top and on the bottom
392 MiExpansionPoolPagesInitialCharge
-= 2;
395 // Now initialize the nonpaged pool expansion PTE space. Remember there's a
396 // guard page on top so make sure to skip it. The bottom guard page will be
397 // guaranteed by the fact our size is off by one.
399 MiInitializeSystemPtes(PointerPte
+ 1,
400 MiExpansionPoolPagesInitialCharge
,
401 NonPagedPoolExpansion
);
406 MmDeterminePoolType(IN PVOID PoolAddress
)
409 // Use a simple bounds check
411 if (PoolAddress
>= MmPagedPoolStart
&& PoolAddress
<= MmPagedPoolEnd
)
413 else if (PoolAddress
>= MmNonPagedPoolStart
&& PoolAddress
<= MmNonPagedPoolEnd
)
415 KeBugCheckEx(BAD_POOL_CALLER
, 0x42, (ULONG_PTR
)PoolAddress
, 0, 0);
420 MiAllocatePoolPages(IN POOL_TYPE PoolType
,
421 IN SIZE_T SizeInBytes
)
423 PFN_NUMBER PageFrameNumber
;
424 PFN_COUNT SizeInPages
, PageTableCount
;
427 PLIST_ENTRY NextEntry
, NextHead
, LastHead
;
428 PMMPTE PointerPte
, StartPte
;
434 PVOID BaseVa
, BaseVaStart
;
435 PMMFREE_POOL_ENTRY FreeEntry
;
438 // Figure out how big the allocation is in pages
440 SizeInPages
= (PFN_COUNT
)BYTES_TO_PAGES(SizeInBytes
);
443 // Check for overflow
445 if (SizeInPages
== 0)
456 if ((PoolType
& BASE_POOL_TYPE_MASK
) == PagedPool
)
459 // If only one page is being requested, try to grab it from the S-LIST
461 if ((SizeInPages
== 1) && (ExQueryDepthSList(&MiPagedPoolSListHead
)))
463 BaseVa
= InterlockedPopEntrySList(&MiPagedPoolSListHead
);
464 if (BaseVa
) return BaseVa
;
468 // Lock the paged pool mutex
470 KeAcquireGuardedMutex(&MmPagedPoolMutex
);
473 // Find some empty allocation space
475 i
= RtlFindClearBitsAndSet(MmPagedPoolInfo
.PagedPoolAllocationMap
,
477 MmPagedPoolInfo
.PagedPoolHint
);
481 // Get the page bit count
483 i
= ((SizeInPages
- 1) / PTE_COUNT
) + 1;
484 DPRINT("Paged pool expansion: %lu %x\n", i
, SizeInPages
);
487 // Check if there is enougn paged pool expansion space left
489 if (MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
>
490 (PMMPDE
)MiAddressToPte(MmPagedPoolInfo
.LastPteForPagedPool
))
495 DPRINT1("FAILED to allocate %Iu bytes from paged pool\n", SizeInBytes
);
496 KeReleaseGuardedMutex(&MmPagedPoolMutex
);
501 // Check if we'll have to expand past the last PTE we have available
503 if (((i
- 1) + MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
) >
504 (PMMPDE
)MiAddressToPte(MmPagedPoolInfo
.LastPteForPagedPool
))
507 // We can only support this much then
509 PointerPde
= MiPteToPde(MmPagedPoolInfo
.LastPteForPagedPool
);
510 PageTableCount
= (PFN_COUNT
)(PointerPde
+ 1 -
511 MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
);
512 ASSERT(PageTableCount
< i
);
518 // Otherwise, there is plenty of space left for this expansion
524 // Get the template PDE we'll use to expand
526 TempPde
= ValidKernelPde
;
529 // Get the first PTE in expansion space
531 PointerPde
= MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
;
532 BaseVa
= MiPdeToPte(PointerPde
);
533 BaseVaStart
= BaseVa
;
536 // Lock the PFN database and loop pages
538 OldIrql
= MiAcquirePfnLock();
542 // It should not already be valid
544 ASSERT(PointerPde
->u
.Hard
.Valid
== 0);
547 MI_SET_USAGE(MI_USAGE_PAGED_POOL
);
548 MI_SET_PROCESS2("Kernel");
549 PageFrameNumber
= MiRemoveAnyPage(MI_GET_NEXT_COLOR());
550 TempPde
.u
.Hard
.PageFrameNumber
= PageFrameNumber
;
551 #if (_MI_PAGING_LEVELS >= 3)
552 /* On PAE/x64 systems, there's no double-buffering */
553 /* Initialize the PFN entry for it */
554 MiInitializePfnForOtherProcess(PageFrameNumber
,
556 PFN_FROM_PTE(MiAddressToPte(PointerPde
)));
558 /* Write the actual PDE now */
559 MI_WRITE_VALID_PDE(PointerPde
, TempPde
);
562 // Save it into our double-buffered system page directory
564 MmSystemPagePtes
[((ULONG_PTR
)PointerPde
& (SYSTEM_PD_SIZE
- 1)) / sizeof(MMPTE
)] = TempPde
;
566 /* Initialize the PFN */
567 MiInitializePfnForOtherProcess(PageFrameNumber
,
569 MmSystemPageDirectory
[(PointerPde
- MiAddressToPde(NULL
)) / PDE_COUNT
]);
573 // Move on to the next expansion address
576 BaseVa
= (PVOID
)((ULONG_PTR
)BaseVa
+ PAGE_SIZE
);
581 // Release the PFN database lock
583 MiReleasePfnLock(OldIrql
);
586 // These pages are now available, clear their availablity bits
588 EndAllocation
= (ULONG
)(MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
-
589 (PMMPDE
)MiAddressToPte(MmPagedPoolInfo
.FirstPteForPagedPool
)) *
591 RtlClearBits(MmPagedPoolInfo
.PagedPoolAllocationMap
,
593 PageTableCount
* PTE_COUNT
);
596 // Update the next expansion location
598 MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
+= PageTableCount
;
601 // Zero out the newly available memory
603 RtlZeroMemory(BaseVaStart
, PageTableCount
* PAGE_SIZE
);
606 // Now try consuming the pages again
608 i
= RtlFindClearBitsAndSet(MmPagedPoolInfo
.PagedPoolAllocationMap
,
616 DPRINT1("FAILED to allocate %Iu bytes from paged pool\n", SizeInBytes
);
617 KeReleaseGuardedMutex(&MmPagedPoolMutex
);
623 // Update the pool hint if the request was just one page
625 if (SizeInPages
== 1) MmPagedPoolInfo
.PagedPoolHint
= i
+ 1;
628 // Update the end bitmap so we know the bounds of this allocation when
629 // the time comes to free it
631 EndAllocation
= i
+ SizeInPages
- 1;
632 RtlSetBit(MmPagedPoolInfo
.EndOfPagedPoolBitmap
, EndAllocation
);
635 // Now we can release the lock (it mainly protects the bitmap)
637 KeReleaseGuardedMutex(&MmPagedPoolMutex
);
640 // Now figure out where this allocation starts
642 BaseVa
= (PVOID
)((ULONG_PTR
)MmPagedPoolStart
+ (i
<< PAGE_SHIFT
));
647 KeFlushEntireTb(TRUE
, TRUE
);
649 /* Setup a demand-zero writable PTE */
650 MI_MAKE_SOFTWARE_PTE(&TempPte
, MM_READWRITE
);
653 // Find the first and last PTE, then loop them all
655 PointerPte
= MiAddressToPte(BaseVa
);
656 StartPte
= PointerPte
+ SizeInPages
;
660 // Write the demand zero PTE and keep going
662 MI_WRITE_INVALID_PTE(PointerPte
, TempPte
);
663 } while (++PointerPte
< StartPte
);
666 // Return the allocation address to the caller
672 // If only one page is being requested, try to grab it from the S-LIST
674 if ((SizeInPages
== 1) && (ExQueryDepthSList(&MiNonPagedPoolSListHead
)))
676 BaseVa
= InterlockedPopEntrySList(&MiNonPagedPoolSListHead
);
677 if (BaseVa
) return BaseVa
;
681 // Allocations of less than 4 pages go into their individual buckets
684 if (i
>= MI_MAX_FREE_PAGE_LISTS
) i
= MI_MAX_FREE_PAGE_LISTS
- 1;
687 // Loop through all the free page lists based on the page index
689 NextHead
= &MmNonPagedPoolFreeListHead
[i
];
690 LastHead
= &MmNonPagedPoolFreeListHead
[MI_MAX_FREE_PAGE_LISTS
];
693 // Acquire the nonpaged pool lock
695 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock
);
699 // Now loop through all the free page entries in this given list
701 NextEntry
= NextHead
->Flink
;
702 while (NextEntry
!= NextHead
)
704 /* Is freed non paged pool enabled */
705 if (MmProtectFreedNonPagedPool
)
707 /* We need to be able to touch this page, unprotect it */
708 MiUnProtectFreeNonPagedPool(NextEntry
, 0);
712 // Grab the entry and see if it can handle our allocation
714 FreeEntry
= CONTAINING_RECORD(NextEntry
, MMFREE_POOL_ENTRY
, List
);
715 ASSERT(FreeEntry
->Signature
== MM_FREE_POOL_SIGNATURE
);
716 if (FreeEntry
->Size
>= SizeInPages
)
719 // It does, so consume the pages from here
721 FreeEntry
->Size
-= SizeInPages
;
724 // The allocation will begin in this free page area
726 BaseVa
= (PVOID
)((ULONG_PTR
)FreeEntry
+
727 (FreeEntry
->Size
<< PAGE_SHIFT
));
729 /* Remove the item from the list, depending if pool is protected */
730 if (MmProtectFreedNonPagedPool
)
731 MiProtectedPoolRemoveEntryList(&FreeEntry
->List
);
733 RemoveEntryList(&FreeEntry
->List
);
736 // However, check if its' still got space left
738 if (FreeEntry
->Size
!= 0)
740 /* Check which list to insert this entry into */
741 i
= FreeEntry
->Size
- 1;
742 if (i
>= MI_MAX_FREE_PAGE_LISTS
) i
= MI_MAX_FREE_PAGE_LISTS
- 1;
744 /* Insert the entry into the free list head, check for prot. pool */
745 if (MmProtectFreedNonPagedPool
)
746 MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead
[i
], &FreeEntry
->List
, TRUE
);
748 InsertTailList(&MmNonPagedPoolFreeListHead
[i
], &FreeEntry
->List
);
750 /* Is freed non paged pool protected? */
751 if (MmProtectFreedNonPagedPool
)
753 /* Protect the freed pool! */
754 MiProtectFreeNonPagedPool(FreeEntry
, FreeEntry
->Size
);
759 // Grab the PTE for this allocation
761 PointerPte
= MiAddressToPte(BaseVa
);
762 ASSERT(PointerPte
->u
.Hard
.Valid
== 1);
765 // Grab the PFN NextEntry and index
767 Pfn1
= MiGetPfnEntry(PFN_FROM_PTE(PointerPte
));
770 // Now mark it as the beginning of an allocation
772 ASSERT(Pfn1
->u3
.e1
.StartOfAllocation
== 0);
773 Pfn1
->u3
.e1
.StartOfAllocation
= 1;
775 /* Mark it as special pool if needed */
776 ASSERT(Pfn1
->u4
.VerifierAllocation
== 0);
777 if (PoolType
& VERIFIER_POOL_MASK
)
779 Pfn1
->u4
.VerifierAllocation
= 1;
783 // Check if the allocation is larger than one page
785 if (SizeInPages
!= 1)
788 // Navigate to the last PFN entry and PTE
790 PointerPte
+= SizeInPages
- 1;
791 ASSERT(PointerPte
->u
.Hard
.Valid
== 1);
792 Pfn1
= MiGetPfnEntry(PointerPte
->u
.Hard
.PageFrameNumber
);
796 // Mark this PFN as the last (might be the same as the first)
798 ASSERT(Pfn1
->u3
.e1
.EndOfAllocation
== 0);
799 Pfn1
->u3
.e1
.EndOfAllocation
= 1;
802 // Release the nonpaged pool lock, and return the allocation
804 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock
, OldIrql
);
809 // Try the next free page entry
811 NextEntry
= FreeEntry
->List
.Flink
;
813 /* Is freed non paged pool protected? */
814 if (MmProtectFreedNonPagedPool
)
816 /* Protect the freed pool! */
817 MiProtectFreeNonPagedPool(FreeEntry
, FreeEntry
->Size
);
820 } while (++NextHead
< LastHead
);
823 // If we got here, we're out of space.
824 // Start by releasing the lock
826 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock
, OldIrql
);
829 // Allocate some system PTEs
831 StartPte
= MiReserveSystemPtes(SizeInPages
, NonPagedPoolExpansion
);
832 PointerPte
= StartPte
;
833 if (StartPte
== NULL
)
838 DPRINT1("Out of NP Expansion Pool\n");
843 // Acquire the pool lock now
845 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock
);
848 // Lock the PFN database too
850 MiAcquirePfnLockAtDpcLevel();
855 TempPte
= ValidKernelPte
;
858 /* Allocate a page */
859 MI_SET_USAGE(MI_USAGE_PAGED_POOL
);
860 MI_SET_PROCESS2("Kernel");
861 PageFrameNumber
= MiRemoveAnyPage(MI_GET_NEXT_COLOR());
863 /* Get the PFN entry for it and fill it out */
864 Pfn1
= MiGetPfnEntry(PageFrameNumber
);
865 Pfn1
->u3
.e2
.ReferenceCount
= 1;
866 Pfn1
->u2
.ShareCount
= 1;
867 Pfn1
->PteAddress
= PointerPte
;
868 Pfn1
->u3
.e1
.PageLocation
= ActiveAndValid
;
869 Pfn1
->u4
.VerifierAllocation
= 0;
871 /* Write the PTE for it */
872 TempPte
.u
.Hard
.PageFrameNumber
= PageFrameNumber
;
873 MI_WRITE_VALID_PTE(PointerPte
++, TempPte
);
874 } while (--SizeInPages
> 0);
877 // This is the last page
879 Pfn1
->u3
.e1
.EndOfAllocation
= 1;
882 // Get the first page and mark it as such
884 Pfn1
= MiGetPfnEntry(StartPte
->u
.Hard
.PageFrameNumber
);
885 Pfn1
->u3
.e1
.StartOfAllocation
= 1;
887 /* Mark it as a verifier allocation if needed */
888 ASSERT(Pfn1
->u4
.VerifierAllocation
== 0);
889 if (PoolType
& VERIFIER_POOL_MASK
) Pfn1
->u4
.VerifierAllocation
= 1;
892 // Release the PFN and nonpaged pool lock
894 MiReleasePfnLockFromDpcLevel();
895 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock
, OldIrql
);
898 // Return the address
900 return MiPteToAddress(StartPte
);
905 MiFreePoolPages(IN PVOID StartingVa
)
907 PMMPTE PointerPte
, StartPte
;
908 PMMPFN Pfn1
, StartPfn
;
909 PFN_COUNT FreePages
, NumberOfPages
;
911 PMMFREE_POOL_ENTRY FreeEntry
, NextEntry
, LastEntry
;
918 if ((StartingVa
>= MmPagedPoolStart
) && (StartingVa
<= MmPagedPoolEnd
))
921 // Calculate the offset from the beginning of paged pool, and convert it
924 Offset
= (ULONG_PTR
)StartingVa
- (ULONG_PTR
)MmPagedPoolStart
;
925 i
= (ULONG
)(Offset
>> PAGE_SHIFT
);
929 // Now use the end bitmap to scan until we find a set bit, meaning that
930 // this allocation finishes here
932 while (!RtlTestBit(MmPagedPoolInfo
.EndOfPagedPoolBitmap
, End
)) End
++;
935 // Now calculate the total number of pages this allocation spans. If it's
936 // only one page, add it to the S-LIST instead of freeing it
938 NumberOfPages
= End
- i
+ 1;
939 if ((NumberOfPages
== 1) &&
940 (ExQueryDepthSList(&MiPagedPoolSListHead
) < MiPagedPoolSListMaximum
))
942 InterlockedPushEntrySList(&MiPagedPoolSListHead
, StartingVa
);
946 /* Delete the actual pages */
947 PointerPte
= MmPagedPoolInfo
.FirstPteForPagedPool
+ i
;
948 FreePages
= MiDeleteSystemPageableVm(PointerPte
, NumberOfPages
, 0, NULL
);
949 ASSERT(FreePages
== NumberOfPages
);
952 // Acquire the paged pool lock
954 KeAcquireGuardedMutex(&MmPagedPoolMutex
);
957 // Clear the allocation and free bits
959 RtlClearBit(MmPagedPoolInfo
.EndOfPagedPoolBitmap
, End
);
960 RtlClearBits(MmPagedPoolInfo
.PagedPoolAllocationMap
, i
, NumberOfPages
);
963 // Update the hint if we need to
965 if (i
< MmPagedPoolInfo
.PagedPoolHint
) MmPagedPoolInfo
.PagedPoolHint
= i
;
968 // Release the lock protecting the bitmaps
970 KeReleaseGuardedMutex(&MmPagedPoolMutex
);
973 // And finally return the number of pages freed
975 return NumberOfPages
;
979 // Get the first PTE and its corresponding PFN entry. If this is also the
980 // last PTE, meaning that this allocation was only for one page, push it into
981 // the S-LIST instead of freeing it
983 StartPte
= PointerPte
= MiAddressToPte(StartingVa
);
984 StartPfn
= Pfn1
= MiGetPfnEntry(PointerPte
->u
.Hard
.PageFrameNumber
);
985 if ((Pfn1
->u3
.e1
.EndOfAllocation
== 1) &&
986 (ExQueryDepthSList(&MiNonPagedPoolSListHead
) < MiNonPagedPoolSListMaximum
))
988 InterlockedPushEntrySList(&MiNonPagedPoolSListHead
, StartingVa
);
993 // Loop until we find the last PTE
995 while (Pfn1
->u3
.e1
.EndOfAllocation
== 0)
1001 Pfn1
= MiGetPfnEntry(PointerPte
->u
.Hard
.PageFrameNumber
);
1005 // Now we know how many pages we have
1007 NumberOfPages
= (PFN_COUNT
)(PointerPte
- StartPte
+ 1);
1010 // Acquire the nonpaged pool lock
1012 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock
);
1015 // Mark the first and last PTEs as not part of an allocation anymore
1017 StartPfn
->u3
.e1
.StartOfAllocation
= 0;
1018 Pfn1
->u3
.e1
.EndOfAllocation
= 0;
1021 // Assume we will free as many pages as the allocation was
1023 FreePages
= NumberOfPages
;
1026 // Peek one page past the end of the allocation
1031 // Guard against going past initial nonpaged pool
1033 if (MiGetPfnEntryIndex(Pfn1
) == MiEndOfInitialPoolFrame
)
1036 // This page is on the outskirts of initial nonpaged pool, so ignore it
1043 ASSERT((ULONG_PTR
)StartingVa
+ NumberOfPages
<= (ULONG_PTR
)MmNonPagedPoolEnd
);
1045 /* Check if protected pool is enabled */
1046 if (MmProtectFreedNonPagedPool
)
1048 /* The freed block will be merged, it must be made accessible */
1049 MiUnProtectFreeNonPagedPool(MiPteToAddress(PointerPte
), 0);
1053 // Otherwise, our entire allocation must've fit within the initial non
1054 // paged pool, or the expansion nonpaged pool, so get the PFN entry of
1055 // the next allocation
1057 if (PointerPte
->u
.Hard
.Valid
== 1)
1060 // It's either expansion or initial: get the PFN entry
1062 Pfn1
= MiGetPfnEntry(PointerPte
->u
.Hard
.PageFrameNumber
);
1067 // This means we've reached the guard page that protects the end of
1068 // the expansion nonpaged pool
1076 // Check if this allocation actually exists
1078 if ((Pfn1
) && (Pfn1
->u3
.e1
.StartOfAllocation
== 0))
1081 // It doesn't, so we should actually locate a free entry descriptor
1083 FreeEntry
= (PMMFREE_POOL_ENTRY
)((ULONG_PTR
)StartingVa
+
1084 (NumberOfPages
<< PAGE_SHIFT
));
1085 ASSERT(FreeEntry
->Signature
== MM_FREE_POOL_SIGNATURE
);
1086 ASSERT(FreeEntry
->Owner
== FreeEntry
);
1088 /* Consume this entry's pages */
1089 FreePages
+= FreeEntry
->Size
;
1091 /* Remove the item from the list, depending if pool is protected */
1092 if (MmProtectFreedNonPagedPool
)
1093 MiProtectedPoolRemoveEntryList(&FreeEntry
->List
);
1095 RemoveEntryList(&FreeEntry
->List
);
1099 // Now get the official free entry we'll create for the caller's allocation
1101 FreeEntry
= StartingVa
;
1104 // Check if the our allocation is the very first page
1106 if (MiGetPfnEntryIndex(StartPfn
) == MiStartOfInitialPoolFrame
)
1109 // Then we can't do anything or we'll risk underflowing
1116 // Otherwise, get the PTE for the page right before our allocation
1118 PointerPte
-= NumberOfPages
+ 1;
1120 /* Check if protected pool is enabled */
1121 if (MmProtectFreedNonPagedPool
)
1123 /* The freed block will be merged, it must be made accessible */
1124 MiUnProtectFreeNonPagedPool(MiPteToAddress(PointerPte
), 0);
1127 /* Check if this is valid pool, or a guard page */
1128 if (PointerPte
->u
.Hard
.Valid
== 1)
1131 // It's either expansion or initial nonpaged pool, get the PFN entry
1133 Pfn1
= MiGetPfnEntry(PointerPte
->u
.Hard
.PageFrameNumber
);
1138 // We must've reached the guard page, so don't risk touching it
1145 // Check if there is a valid PFN entry for the page before the allocation
1146 // and then check if this page was actually the end of an allocation.
1147 // If it wasn't, then we know for sure it's a free page
1149 if ((Pfn1
) && (Pfn1
->u3
.e1
.EndOfAllocation
== 0))
1152 // Get the free entry descriptor for that given page range
1154 FreeEntry
= (PMMFREE_POOL_ENTRY
)((ULONG_PTR
)StartingVa
- PAGE_SIZE
);
1155 ASSERT(FreeEntry
->Signature
== MM_FREE_POOL_SIGNATURE
);
1156 FreeEntry
= FreeEntry
->Owner
;
1158 /* Check if protected pool is enabled */
1159 if (MmProtectFreedNonPagedPool
)
1161 /* The freed block will be merged, it must be made accessible */
1162 MiUnProtectFreeNonPagedPool(FreeEntry
, 0);
1166 // Check if the entry is small enough to be indexed on a free list
1167 // If it is, we'll want to re-insert it, since we're about to
1168 // collapse our pages on top of it, which will change its count
1170 if (FreeEntry
->Size
< (MI_MAX_FREE_PAGE_LISTS
- 1))
1172 /* Remove the item from the list, depending if pool is protected */
1173 if (MmProtectFreedNonPagedPool
)
1174 MiProtectedPoolRemoveEntryList(&FreeEntry
->List
);
1176 RemoveEntryList(&FreeEntry
->List
);
1181 FreeEntry
->Size
+= FreePages
;
1184 // And now find the new appropriate list to place it in
1186 i
= (ULONG
)(FreeEntry
->Size
- 1);
1187 if (i
>= MI_MAX_FREE_PAGE_LISTS
) i
= MI_MAX_FREE_PAGE_LISTS
- 1;
1189 /* Insert the entry into the free list head, check for prot. pool */
1190 if (MmProtectFreedNonPagedPool
)
1191 MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead
[i
], &FreeEntry
->List
, TRUE
);
1193 InsertTailList(&MmNonPagedPoolFreeListHead
[i
], &FreeEntry
->List
);
1198 // Otherwise, just combine our free pages into this entry
1200 FreeEntry
->Size
+= FreePages
;
1205 // Check if we were unable to do any compaction, and we'll stick with this
1207 if (FreeEntry
== StartingVa
)
1210 // Well, now we are a free entry. At worse we just have our newly freed
1211 // pages, at best we have our pages plus whatever entry came after us
1213 FreeEntry
->Size
= FreePages
;
1216 // Find the appropriate list we should be on
1218 i
= FreeEntry
->Size
- 1;
1219 if (i
>= MI_MAX_FREE_PAGE_LISTS
) i
= MI_MAX_FREE_PAGE_LISTS
- 1;
1221 /* Insert the entry into the free list head, check for prot. pool */
1222 if (MmProtectFreedNonPagedPool
)
1223 MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead
[i
], &FreeEntry
->List
, TRUE
);
1225 InsertTailList(&MmNonPagedPoolFreeListHead
[i
], &FreeEntry
->List
);
1229 // Just a sanity check
1231 ASSERT(FreePages
!= 0);
1234 // Get all the pages between our allocation and its end. These will all now
1235 // become free page chunks.
1237 NextEntry
= StartingVa
;
1238 LastEntry
= (PMMFREE_POOL_ENTRY
)((ULONG_PTR
)NextEntry
+ (FreePages
<< PAGE_SHIFT
));
1242 // Link back to the parent free entry, and keep going
1244 NextEntry
->Owner
= FreeEntry
;
1245 NextEntry
->Signature
= MM_FREE_POOL_SIGNATURE
;
1246 NextEntry
= (PMMFREE_POOL_ENTRY
)((ULONG_PTR
)NextEntry
+ PAGE_SIZE
);
1247 } while (NextEntry
!= LastEntry
);
1249 /* Is freed non paged pool protected? */
1250 if (MmProtectFreedNonPagedPool
)
1252 /* Protect the freed pool! */
1253 MiProtectFreeNonPagedPool(FreeEntry
, FreeEntry
->Size
);
1257 // We're done, release the lock and let the caller know how much we freed
1259 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock
, OldIrql
);
1260 return NumberOfPages
;
1266 MiRaisePoolQuota(IN POOL_TYPE PoolType
,
1267 IN ULONG CurrentMaxQuota
,
1268 OUT PULONG NewMaxQuota
)
1274 *NewMaxQuota
= CurrentMaxQuota
+ 65536;
1280 MiInitializeSessionPool(VOID
)
1282 PMMPTE PointerPte
, LastPte
;
1283 PMMPDE PointerPde
, LastPde
;
1284 PFN_NUMBER PageFrameIndex
, PdeCount
;
1285 PPOOL_DESCRIPTOR PoolDescriptor
;
1286 PMM_SESSION_SPACE SessionGlobal
;
1287 PMM_PAGED_POOL_INFO PagedPoolInfo
;
1289 ULONG Index
, PoolSize
, BitmapSize
;
1292 /* Lock session pool */
1293 SessionGlobal
= MmSessionSpace
->GlobalVirtualAddress
;
1294 KeInitializeGuardedMutex(&SessionGlobal
->PagedPoolMutex
);
1296 /* Setup a valid pool descriptor */
1297 PoolDescriptor
= &MmSessionSpace
->PagedPool
;
1298 ExInitializePoolDescriptor(PoolDescriptor
,
1302 &SessionGlobal
->PagedPoolMutex
);
1304 /* Setup the pool addresses */
1305 MmSessionSpace
->PagedPoolStart
= (PVOID
)MiSessionPoolStart
;
1306 MmSessionSpace
->PagedPoolEnd
= (PVOID
)((ULONG_PTR
)MiSessionPoolEnd
- 1);
1307 DPRINT1("Session Pool Start: 0x%p End: 0x%p\n",
1308 MmSessionSpace
->PagedPoolStart
, MmSessionSpace
->PagedPoolEnd
);
1310 /* Reset all the counters */
1311 PagedPoolInfo
= &MmSessionSpace
->PagedPoolInfo
;
1312 PagedPoolInfo
->PagedPoolCommit
= 0;
1313 PagedPoolInfo
->PagedPoolHint
= 0;
1314 PagedPoolInfo
->AllocatedPagedPool
= 0;
1316 /* Compute PDE and PTE addresses */
1317 PointerPde
= MiAddressToPde(MmSessionSpace
->PagedPoolStart
);
1318 PointerPte
= MiAddressToPte(MmSessionSpace
->PagedPoolStart
);
1319 LastPde
= MiAddressToPde(MmSessionSpace
->PagedPoolEnd
);
1320 LastPte
= MiAddressToPte(MmSessionSpace
->PagedPoolEnd
);
1322 /* Write them down */
1323 MmSessionSpace
->PagedPoolBasePde
= PointerPde
;
1324 PagedPoolInfo
->FirstPteForPagedPool
= PointerPte
;
1325 PagedPoolInfo
->LastPteForPagedPool
= LastPte
;
1326 PagedPoolInfo
->NextPdeForPagedPoolExpansion
= PointerPde
+ 1;
1329 PdeCount
= LastPde
- PointerPde
;
1330 RtlZeroMemory(PointerPde
, (PdeCount
+ 1) * sizeof(MMPTE
));
1332 /* Initialize the PFN for the PDE */
1333 Status
= MiInitializeAndChargePfn(&PageFrameIndex
,
1335 MmSessionSpace
->SessionPageDirectoryIndex
,
1337 ASSERT(NT_SUCCESS(Status
) == TRUE
);
1339 /* Initialize the first page table */
1340 Index
= (ULONG_PTR
)MmSessionSpace
->PagedPoolStart
- (ULONG_PTR
)MmSessionBase
;
1342 #ifndef _M_AMD64 // FIXME
1343 ASSERT(MmSessionSpace
->PageTables
[Index
].u
.Long
== 0);
1344 MmSessionSpace
->PageTables
[Index
] = *PointerPde
;
1347 /* Bump up counters */
1348 InterlockedIncrementSizeT(&MmSessionSpace
->NonPageablePages
);
1349 InterlockedIncrementSizeT(&MmSessionSpace
->CommittedPages
);
1351 /* Compute the size of the pool in pages, and of the bitmap for it */
1352 PoolSize
= MmSessionPoolSize
>> PAGE_SHIFT
;
1353 BitmapSize
= sizeof(RTL_BITMAP
) + ((PoolSize
+ 31) / 32) * sizeof(ULONG
);
1355 /* Allocate and initialize the bitmap to track allocations */
1356 PagedPoolInfo
->PagedPoolAllocationMap
= ExAllocatePoolWithTag(NonPagedPool
,
1359 ASSERT(PagedPoolInfo
->PagedPoolAllocationMap
!= NULL
);
1360 RtlInitializeBitMap(PagedPoolInfo
->PagedPoolAllocationMap
,
1361 (PULONG
)(PagedPoolInfo
->PagedPoolAllocationMap
+ 1),
1364 /* Set all bits, but clear the first page table's worth */
1365 RtlSetAllBits(PagedPoolInfo
->PagedPoolAllocationMap
);
1366 RtlClearBits(PagedPoolInfo
->PagedPoolAllocationMap
, 0, PTE_PER_PAGE
);
1368 /* Allocate and initialize the bitmap to track free space */
1369 PagedPoolInfo
->EndOfPagedPoolBitmap
= ExAllocatePoolWithTag(NonPagedPool
,
1372 ASSERT(PagedPoolInfo
->EndOfPagedPoolBitmap
!= NULL
);
1373 RtlInitializeBitMap(PagedPoolInfo
->EndOfPagedPoolBitmap
,
1374 (PULONG
)(PagedPoolInfo
->EndOfPagedPoolBitmap
+ 1),
1377 /* Clear all the bits and return success */
1378 RtlClearAllBits(PagedPoolInfo
->EndOfPagedPoolBitmap
);
1379 return STATUS_SUCCESS
;
1382 /* PUBLIC FUNCTIONS ***********************************************************/
1389 MmAllocateMappingAddress(IN SIZE_T NumberOfBytes
,
1401 MmFreeMappingAddress(IN PVOID BaseAddress
,