2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/pool.c
5 * PURPOSE: ARM Memory Manager Pool Allocator
6 * PROGRAMMERS: ReactOS Portable Systems Group
9 /* INCLUDES *******************************************************************/
15 #line 15 "ARMĀ³::POOL"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
19 /* GLOBALS ********************************************************************/
21 LIST_ENTRY MmNonPagedPoolFreeListHead
[MI_MAX_FREE_PAGE_LISTS
];
22 PFN_NUMBER MmNumberOfFreeNonPagedPool
, MiExpansionPoolPagesInitialCharge
;
23 PVOID MmNonPagedPoolEnd0
;
24 PFN_NUMBER MiStartOfInitialPoolFrame
, MiEndOfInitialPoolFrame
;
25 KGUARDED_MUTEX MmPagedPoolMutex
;
26 MM_PAGED_POOL_INFO MmPagedPoolInfo
;
27 SIZE_T MmAllocatedNonPagedPool
;
28 ULONG MmSpecialPoolTag
;
30 /* PRIVATE FUNCTIONS **********************************************************/
34 MiInitializeNonPagedPoolThresholds(VOID
)
36 PFN_NUMBER Size
= MmMaximumNonPagedPoolInPages
;
38 /* Default low threshold of 8MB or one third of nonpaged pool */
39 MiLowNonPagedPoolThreshold
= (8 * _1MB
) >> PAGE_SHIFT
;
40 MiLowNonPagedPoolThreshold
= min(MiLowNonPagedPoolThreshold
, Size
/ 3);
42 /* Default high threshold of 20MB or 50% */
43 MiHighNonPagedPoolThreshold
= (20 * _1MB
) >> PAGE_SHIFT
;
44 MiHighNonPagedPoolThreshold
= min(MiHighNonPagedPoolThreshold
, Size
/ 2);
45 ASSERT(MiLowNonPagedPoolThreshold
< MiHighNonPagedPoolThreshold
);
50 MiInitializePoolEvents(VOID
)
53 PFN_NUMBER FreePoolInPages
;
56 KeAcquireGuardedMutex(&MmPagedPoolMutex
);
58 /* Total size of the paged pool minus the allocated size, is free */
59 FreePoolInPages
= MmSizeOfPagedPoolInPages
- MmPagedPoolInfo
.AllocatedPagedPool
;
61 /* Check the initial state high state */
62 if (FreePoolInPages
>= MiHighPagedPoolThreshold
)
64 /* We have plenty of pool */
65 KeSetEvent(MiHighPagedPoolEvent
, 0, FALSE
);
70 KeClearEvent(MiHighPagedPoolEvent
);
73 /* Check the initial low state */
74 if (FreePoolInPages
<= MiLowPagedPoolThreshold
)
76 /* We're very low in free pool memory */
77 KeSetEvent(MiLowPagedPoolEvent
, 0, FALSE
);
82 KeClearEvent(MiLowPagedPoolEvent
);
85 /* Release the paged pool lock */
86 KeReleaseGuardedMutex(&MmPagedPoolMutex
);
88 /* Now it's time for the nonpaged pool lock */
89 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock
);
91 /* Free pages are the maximum minus what's been allocated */
92 FreePoolInPages
= MmMaximumNonPagedPoolInPages
- MmAllocatedNonPagedPool
;
94 /* Check if we have plenty */
95 if (FreePoolInPages
>= MiHighNonPagedPoolThreshold
)
97 /* We do, set the event */
98 KeSetEvent(MiHighNonPagedPoolEvent
, 0, FALSE
);
102 /* We don't, clear the event */
103 KeClearEvent(MiHighNonPagedPoolEvent
);
106 /* Check if we have very little */
107 if (FreePoolInPages
<= MiLowNonPagedPoolThreshold
)
109 /* We do, set the event */
110 KeSetEvent(MiLowNonPagedPoolEvent
, 0, FALSE
);
114 /* We don't, clear it */
115 KeClearEvent(MiLowNonPagedPoolEvent
);
118 /* We're done, release the nonpaged pool lock */
119 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock
, OldIrql
);
124 MiInitializeNonPagedPool(VOID
)
127 PFN_NUMBER PoolPages
;
128 PMMFREE_POOL_ENTRY FreeEntry
, FirstEntry
;
133 // We keep 4 lists of free pages (4 lists help avoid contention)
135 for (i
= 0; i
< MI_MAX_FREE_PAGE_LISTS
; i
++)
138 // Initialize each of them
140 InitializeListHead(&MmNonPagedPoolFreeListHead
[i
]);
144 // Calculate how many pages the initial nonpaged pool has
146 PoolPages
= BYTES_TO_PAGES(MmSizeOfNonPagedPoolInBytes
);
147 MmNumberOfFreeNonPagedPool
= PoolPages
;
150 // Initialize the first free entry
152 FreeEntry
= MmNonPagedPoolStart
;
153 FirstEntry
= FreeEntry
;
154 FreeEntry
->Size
= PoolPages
;
155 FreeEntry
->Owner
= FirstEntry
;
158 // Insert it into the last list
160 InsertHeadList(&MmNonPagedPoolFreeListHead
[MI_MAX_FREE_PAGE_LISTS
- 1],
164 // Now create free entries for every single other page
166 while (PoolPages
-- > 1)
169 // Link them all back to the original entry
171 FreeEntry
= (PMMFREE_POOL_ENTRY
)((ULONG_PTR
)FreeEntry
+ PAGE_SIZE
);
172 FreeEntry
->Owner
= FirstEntry
;
176 // Validate and remember first allocated pool page
178 PointerPte
= MiAddressToPte(MmNonPagedPoolStart
);
179 ASSERT(PointerPte
->u
.Hard
.Valid
== 1);
180 MiStartOfInitialPoolFrame
= PFN_FROM_PTE(PointerPte
);
183 // Keep track of where initial nonpaged pool ends
185 MmNonPagedPoolEnd0
= (PVOID
)((ULONG_PTR
)MmNonPagedPoolStart
+
186 MmSizeOfNonPagedPoolInBytes
);
189 // Validate and remember last allocated pool page
191 PointerPte
= MiAddressToPte((PVOID
)((ULONG_PTR
)MmNonPagedPoolEnd0
- 1));
192 ASSERT(PointerPte
->u
.Hard
.Valid
== 1);
193 MiEndOfInitialPoolFrame
= PFN_FROM_PTE(PointerPte
);
196 // Validate the first nonpaged pool expansion page (which is a guard page)
198 PointerPte
= MiAddressToPte(MmNonPagedPoolExpansionStart
);
199 ASSERT(PointerPte
->u
.Hard
.Valid
== 0);
202 // Calculate the size of the expansion region alone
204 MiExpansionPoolPagesInitialCharge
=
205 BYTES_TO_PAGES(MmMaximumNonPagedPoolInBytes
- MmSizeOfNonPagedPoolInBytes
);
208 // Remove 2 pages, since there's a guard page on top and on the bottom
210 MiExpansionPoolPagesInitialCharge
-= 2;
213 // Now initialize the nonpaged pool expansion PTE space. Remember there's a
214 // guard page on top so make sure to skip it. The bottom guard page will be
215 // guaranteed by the fact our size is off by one.
217 MiInitializeSystemPtes(PointerPte
+ 1,
218 MiExpansionPoolPagesInitialCharge
,
219 NonPagedPoolExpansion
);
224 MiAllocatePoolPages(IN POOL_TYPE PoolType
,
225 IN SIZE_T SizeInBytes
)
227 PFN_NUMBER SizeInPages
, PageFrameNumber
;
230 PLIST_ENTRY NextEntry
, NextHead
, LastHead
;
231 PMMPTE PointerPte
, StartPte
;
234 PVOID BaseVa
, BaseVaStart
;
235 PMMFREE_POOL_ENTRY FreeEntry
;
236 PKSPIN_LOCK_QUEUE LockQueue
;
239 // Figure out how big the allocation is in pages
241 SizeInPages
= BYTES_TO_PAGES(SizeInBytes
);
246 if (PoolType
== PagedPool
)
249 // Lock the paged pool mutex
251 KeAcquireGuardedMutex(&MmPagedPoolMutex
);
254 // Find some empty allocation space
256 i
= RtlFindClearBitsAndSet(MmPagedPoolInfo
.PagedPoolAllocationMap
,
258 MmPagedPoolInfo
.PagedPoolHint
);
262 // Get the page bit count
264 i
= ((SizeInPages
- 1) / 1024) + 1;
267 // Check if there is enougn paged pool expansion space left
269 if (MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
>
270 MiAddressToPte(MmPagedPoolInfo
.LastPteForPagedPool
))
275 DPRINT1("OUT OF PAGED POOL!!!\n");
276 KeReleaseGuardedMutex(&MmPagedPoolMutex
);
281 // Check if we'll have to expand past the last PTE we have available
283 if (((i
- 1) + MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
) >
284 MiAddressToPte(MmPagedPoolInfo
.LastPteForPagedPool
))
287 // We can only support this much then
289 SizeInPages
= MiAddressToPte(MmPagedPoolInfo
.LastPteForPagedPool
) -
290 MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
+
292 ASSERT(SizeInPages
< i
);
298 // Otherwise, there is plenty of space left for this expansion
304 // Get the template PTE we'll use to expand
306 TempPte
= ValidKernelPte
;
309 // Get the first PTE in expansion space
311 PointerPte
= MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
;
312 BaseVa
= MiPteToAddress(PointerPte
);
313 BaseVaStart
= BaseVa
;
316 // Lock the PFN database and loop pages
318 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
322 // It should not already be valid
324 ASSERT(PointerPte
->u
.Hard
.Valid
== 0);
327 // Request a paged pool page and write the PFN for it
329 PageFrameNumber
= MmAllocPage(MC_PPOOL
);
330 TempPte
.u
.Hard
.PageFrameNumber
= PageFrameNumber
;
333 // Save it into our double-buffered system page directory
335 MmSystemPagePtes
[(ULONG_PTR
)PointerPte
& (PAGE_SIZE
- 1) /
336 sizeof(MMPTE
)] = TempPte
;
339 // Write the actual PTE now
341 *PointerPte
++ = TempPte
;
344 // Move on to the next expansion address
346 BaseVa
= (PVOID
)((ULONG_PTR
)BaseVa
+ PAGE_SIZE
);
350 // Release the PFN database lock
352 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
355 // These pages are now available, clear their availablity bits
357 RtlClearBits(MmPagedPoolInfo
.PagedPoolAllocationMap
,
358 (MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
-
359 MiAddressToPte(MmPagedPoolInfo
.FirstPteForPagedPool
)) *
364 // Update the next expansion location
366 MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
+= SizeInPages
;
369 // Zero out the newly available memory
371 RtlZeroMemory(BaseVaStart
, SizeInPages
* PAGE_SIZE
);
374 // Now try consuming the pages again
376 SizeInPages
= BYTES_TO_PAGES(SizeInBytes
);
377 i
= RtlFindClearBitsAndSet(MmPagedPoolInfo
.PagedPoolAllocationMap
,
385 DPRINT1("OUT OF PAGED POOL!!!\n");
386 KeReleaseGuardedMutex(&MmPagedPoolMutex
);
392 // Update the pool hint if the request was just one page
394 if (SizeInPages
== 1) MmPagedPoolInfo
.PagedPoolHint
= i
+ 1;
397 // Update the end bitmap so we know the bounds of this allocation when
398 // the time comes to free it
400 RtlSetBit(MmPagedPoolInfo
.EndOfPagedPoolBitmap
, i
+ SizeInPages
- 1);
403 // Now we can release the lock (it mainly protects the bitmap)
405 KeReleaseGuardedMutex(&MmPagedPoolMutex
);
408 // Now figure out where this allocation starts
410 BaseVa
= (PVOID
)((ULONG_PTR
)MmPagedPoolStart
+ (i
<< PAGE_SHIFT
));
415 KeFlushEntireTb(TRUE
, TRUE
);
418 // Setup a demand-zero writable PTE
421 MI_MAKE_WRITE_PAGE(&TempPte
);
424 // Find the first and last PTE, then loop them all
426 PointerPte
= MiAddressToPte(BaseVa
);
427 StartPte
= PointerPte
+ SizeInPages
;
431 // Write the demand zero PTE and keep going
433 *PointerPte
++ = TempPte
;
434 } while (PointerPte
< StartPte
);
437 // Return the allocation address to the caller
443 // Allocations of less than 4 pages go into their individual buckets
446 if (i
>= MI_MAX_FREE_PAGE_LISTS
) i
= MI_MAX_FREE_PAGE_LISTS
- 1;
449 // Loop through all the free page lists based on the page index
451 NextHead
= &MmNonPagedPoolFreeListHead
[i
];
452 LastHead
= &MmNonPagedPoolFreeListHead
[MI_MAX_FREE_PAGE_LISTS
];
455 // Acquire the nonpaged pool lock
457 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock
);
461 // Now loop through all the free page entries in this given list
463 NextEntry
= NextHead
->Flink
;
464 while (NextEntry
!= NextHead
)
467 // Grab the entry and see if it can handle our allocation
469 FreeEntry
= CONTAINING_RECORD(NextEntry
, MMFREE_POOL_ENTRY
, List
);
470 if (FreeEntry
->Size
>= SizeInPages
)
473 // It does, so consume the pages from here
475 FreeEntry
->Size
-= SizeInPages
;
478 // The allocation will begin in this free page area
480 BaseVa
= (PVOID
)((ULONG_PTR
)FreeEntry
+
481 (FreeEntry
->Size
<< PAGE_SHIFT
));
484 // This is not a free page segment anymore
486 RemoveEntryList(&FreeEntry
->List
);
489 // However, check if its' still got space left
491 if (FreeEntry
->Size
!= 0)
494 // Insert it back into a different list, based on its pages
496 i
= FreeEntry
->Size
- 1;
497 if (i
>= MI_MAX_FREE_PAGE_LISTS
) i
= MI_MAX_FREE_PAGE_LISTS
- 1;
498 InsertTailList (&MmNonPagedPoolFreeListHead
[i
],
503 // Grab the PTE for this allocation
505 PointerPte
= MiAddressToPte(BaseVa
);
506 ASSERT(PointerPte
->u
.Hard
.Valid
== 1);
509 // Grab the PFN NextEntry and index
511 Pfn1
= MiGetPfnEntry(PFN_FROM_PTE(PointerPte
));
514 // Now mark it as the beginning of an allocation
516 ASSERT(Pfn1
->u3
.e1
.StartOfAllocation
== 0);
517 Pfn1
->u3
.e1
.StartOfAllocation
= 1;
520 // Check if the allocation is larger than one page
522 if (SizeInPages
!= 1)
525 // Navigate to the last PFN entry and PTE
527 PointerPte
+= SizeInPages
- 1;
528 ASSERT(PointerPte
->u
.Hard
.Valid
== 1);
529 Pfn1
= MiGetPfnEntry(PointerPte
->u
.Hard
.PageFrameNumber
);
533 // Mark this PFN as the last (might be the same as the first)
535 ASSERT(Pfn1
->u3
.e1
.EndOfAllocation
== 0);
536 Pfn1
->u3
.e1
.EndOfAllocation
= 1;
539 // Release the nonpaged pool lock, and return the allocation
541 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock
, OldIrql
);
546 // Try the next free page entry
548 NextEntry
= FreeEntry
->List
.Flink
;
550 } while (++NextHead
< LastHead
);
553 // If we got here, we're out of space.
554 // Start by releasing the lock
556 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock
, OldIrql
);
559 // Allocate some system PTEs
561 StartPte
= MiReserveSystemPtes(SizeInPages
, NonPagedPoolExpansion
);
562 PointerPte
= StartPte
;
563 if (StartPte
== NULL
)
568 DPRINT1("Out of NP Expansion Pool\n");
573 // Acquire the pool lock now
575 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock
);
578 // Lock the PFN database too
580 LockQueue
= &KeGetCurrentPrcb()->LockQueue
[LockQueuePfnLock
];
581 KeAcquireQueuedSpinLockAtDpcLevel(LockQueue
);
586 TempPte
= ValidKernelPte
;
592 PageFrameNumber
= MmAllocPage(MC_NPPOOL
);
595 // Get the PFN entry for it
597 Pfn1
= MiGetPfnEntry(PageFrameNumber
);
600 // Write the PTE for it
602 TempPte
.u
.Hard
.PageFrameNumber
= PageFrameNumber
;
603 ASSERT(PointerPte
->u
.Hard
.Valid
== 0);
604 ASSERT(TempPte
.u
.Hard
.Valid
== 1);
605 *PointerPte
++ = TempPte
;
606 } while (--SizeInPages
> 0);
609 // This is the last page
611 Pfn1
->u3
.e1
.EndOfAllocation
= 1;
614 // Get the first page and mark it as such
616 Pfn1
= MiGetPfnEntry(StartPte
->u
.Hard
.PageFrameNumber
);
617 Pfn1
->u3
.e1
.StartOfAllocation
= 1;
620 // Release the PFN and nonpaged pool lock
622 KeReleaseQueuedSpinLockFromDpcLevel(LockQueue
);
623 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock
, OldIrql
);
626 // Return the address
628 return MiPteToAddress(StartPte
);
633 MiFreePoolPages(IN PVOID StartingVa
)
635 PMMPTE PointerPte
, StartPte
;
636 PMMPFN Pfn1
, StartPfn
;
637 PFN_NUMBER FreePages
, NumberOfPages
;
639 PMMFREE_POOL_ENTRY FreeEntry
, NextEntry
, LastEntry
;
645 if ((StartingVa
>= MmPagedPoolStart
) && (StartingVa
<= MmPagedPoolEnd
))
648 // Calculate the offset from the beginning of paged pool, and convert it
651 i
= ((ULONG_PTR
)StartingVa
- (ULONG_PTR
)MmPagedPoolStart
) >> PAGE_SHIFT
;
655 // Now use the end bitmap to scan until we find a set bit, meaning that
656 // this allocation finishes here
658 while (!RtlTestBit(MmPagedPoolInfo
.EndOfPagedPoolBitmap
, End
)) End
++;
661 // Now calculate the total number of pages this allocation spans
663 NumberOfPages
= End
- i
+ 1;
666 // Acquire the paged pool lock
668 KeAcquireGuardedMutex(&MmPagedPoolMutex
);
671 // Clear the allocation and free bits
673 RtlClearBit(MmPagedPoolInfo
.EndOfPagedPoolBitmap
, i
);
674 RtlClearBits(MmPagedPoolInfo
.PagedPoolAllocationMap
, i
, NumberOfPages
);
677 // Update the hint if we need to
679 if (i
< MmPagedPoolInfo
.PagedPoolHint
) MmPagedPoolInfo
.PagedPoolHint
= i
;
682 // Release the lock protecting the bitmaps
684 KeReleaseGuardedMutex(&MmPagedPoolMutex
);
687 // And finally return the number of pages freed
689 return NumberOfPages
;
693 // Get the first PTE and its corresponding PFN entry
695 StartPte
= PointerPte
= MiAddressToPte(StartingVa
);
696 StartPfn
= Pfn1
= MiGetPfnEntry(PointerPte
->u
.Hard
.PageFrameNumber
);
699 // Loop until we find the last PTE
701 while (Pfn1
->u3
.e1
.EndOfAllocation
== 0)
707 Pfn1
= MiGetPfnEntry(PointerPte
->u
.Hard
.PageFrameNumber
);
711 // Now we know how many pages we have
713 NumberOfPages
= PointerPte
- StartPte
+ 1;
716 // Acquire the nonpaged pool lock
718 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock
);
721 // Mark the first and last PTEs as not part of an allocation anymore
723 StartPfn
->u3
.e1
.StartOfAllocation
= 0;
724 Pfn1
->u3
.e1
.EndOfAllocation
= 0;
727 // Assume we will free as many pages as the allocation was
729 FreePages
= NumberOfPages
;
732 // Peek one page past the end of the allocation
737 // Guard against going past initial nonpaged pool
739 if (MiGetPfnEntryIndex(Pfn1
) == MiEndOfInitialPoolFrame
)
742 // This page is on the outskirts of initial nonpaged pool, so ignore it
749 // Otherwise, our entire allocation must've fit within the initial non
750 // paged pool, or the expansion nonpaged pool, so get the PFN entry of
751 // the next allocation
753 ASSERT((ULONG_PTR
)StartingVa
+ NumberOfPages
<= (ULONG_PTR
)MmNonPagedPoolEnd
);
754 if (PointerPte
->u
.Hard
.Valid
== 1)
757 // It's either expansion or initial: get the PFN entry
759 Pfn1
= MiGetPfnEntry(PointerPte
->u
.Hard
.PageFrameNumber
);
764 // This means we've reached the guard page that protects the end of
765 // the expansion nonpaged pool
773 // Check if this allocation actually exists
775 if ((Pfn1
) && (Pfn1
->u3
.e1
.StartOfAllocation
== 0))
778 // It doesn't, so we should actually locate a free entry descriptor
780 FreeEntry
= (PMMFREE_POOL_ENTRY
)((ULONG_PTR
)StartingVa
+
781 (NumberOfPages
<< PAGE_SHIFT
));
782 ASSERT(FreeEntry
->Owner
== FreeEntry
);
785 // Consume this entry's pages, and remove it from its free list
787 FreePages
+= FreeEntry
->Size
;
788 RemoveEntryList (&FreeEntry
->List
);
792 // Now get the official free entry we'll create for the caller's allocation
794 FreeEntry
= StartingVa
;
797 // Check if the our allocation is the very first page
799 if (MiGetPfnEntryIndex(StartPfn
) == MiStartOfInitialPoolFrame
)
802 // Then we can't do anything or we'll risk underflowing
809 // Otherwise, get the PTE for the page right before our allocation
811 PointerPte
-= NumberOfPages
+ 1;
812 if (PointerPte
->u
.Hard
.Valid
== 1)
815 // It's either expansion or initial nonpaged pool, get the PFN entry
817 Pfn1
= MiGetPfnEntry(PointerPte
->u
.Hard
.PageFrameNumber
);
822 // We must've reached the guard page, so don't risk touching it
829 // Check if there is a valid PFN entry for the page before the allocation
830 // and then check if this page was actually the end of an allocation.
831 // If it wasn't, then we know for sure it's a free page
833 if ((Pfn1
) && (Pfn1
->u3
.e1
.EndOfAllocation
== 0))
836 // Get the free entry descriptor for that given page range
838 FreeEntry
= (PMMFREE_POOL_ENTRY
)((ULONG_PTR
)StartingVa
- PAGE_SIZE
);
839 FreeEntry
= FreeEntry
->Owner
;
842 // Check if the entry is small enough to be indexed on a free list
843 // If it is, we'll want to re-insert it, since we're about to
844 // collapse our pages on top of it, which will change its count
846 if (FreeEntry
->Size
< (MI_MAX_FREE_PAGE_LISTS
- 1))
849 // Remove the list from where it is now
851 RemoveEntryList(&FreeEntry
->List
);
856 FreeEntry
->Size
+= FreePages
;
859 // And now find the new appropriate list to place it in
861 i
= (ULONG
)(FreeEntry
->Size
- 1);
862 if (i
>= MI_MAX_FREE_PAGE_LISTS
) i
= MI_MAX_FREE_PAGE_LISTS
- 1;
867 InsertTailList(&MmNonPagedPoolFreeListHead
[i
], &FreeEntry
->List
);
872 // Otherwise, just combine our free pages into this entry
874 FreeEntry
->Size
+= FreePages
;
879 // Check if we were unable to do any compaction, and we'll stick with this
881 if (FreeEntry
== StartingVa
)
884 // Well, now we are a free entry. At worse we just have our newly freed
885 // pages, at best we have our pages plus whatever entry came after us
887 FreeEntry
->Size
= FreePages
;
890 // Find the appropriate list we should be on
892 i
= FreeEntry
->Size
- 1;
893 if (i
>= MI_MAX_FREE_PAGE_LISTS
) i
= MI_MAX_FREE_PAGE_LISTS
- 1;
898 InsertTailList (&MmNonPagedPoolFreeListHead
[i
], &FreeEntry
->List
);
902 // Just a sanity check
904 ASSERT(FreePages
!= 0);
907 // Get all the pages between our allocation and its end. These will all now
908 // become free page chunks.
910 NextEntry
= StartingVa
;
911 LastEntry
= (PMMFREE_POOL_ENTRY
)((ULONG_PTR
)NextEntry
+ (FreePages
<< PAGE_SHIFT
));
915 // Link back to the parent free entry, and keep going
917 NextEntry
->Owner
= FreeEntry
;
918 NextEntry
= (PMMFREE_POOL_ENTRY
)((ULONG_PTR
)NextEntry
+ PAGE_SIZE
);
919 } while (NextEntry
!= LastEntry
);
922 // We're done, release the lock and let the caller know how much we freed
924 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock
, OldIrql
);
925 return NumberOfPages
;
931 MiRaisePoolQuota(IN POOL_TYPE PoolType
,
932 IN ULONG CurrentMaxQuota
,
933 OUT PULONG NewMaxQuota
)
939 *NewMaxQuota
= CurrentMaxQuota
+ 65536;
943 /* PUBLIC FUNCTIONS ***********************************************************/
950 MmAllocateMappingAddress(IN SIZE_T NumberOfBytes
,
962 MmFreeMappingAddress(IN PVOID BaseAddress
,