2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/contmem.c
5 * PURPOSE: ARM Memory Manager Contiguous Memory Allocator
6 * PROGRAMMERS: ReactOS Portable Systems Group
9 /* INCLUDES *******************************************************************/
15 #line 15 "ARMĀ³::CONTMEM"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
19 /* PRIVATE FUNCTIONS **********************************************************/
23 MiFindContiguousPages(IN PFN_NUMBER LowestPfn
,
24 IN PFN_NUMBER HighestPfn
,
25 IN PFN_NUMBER BoundaryPfn
,
26 IN PFN_NUMBER SizeInPages
,
27 IN MEMORY_CACHING_TYPE CacheType
)
29 PFN_NUMBER Page
, PageCount
, LastPage
, Length
, BoundaryMask
;
34 ASSERT(SizeInPages
!= 0);
37 // Convert the boundary PFN into an alignment mask
39 BoundaryMask
= ~(BoundaryPfn
- 1);
42 // Loop all the physical memory blocks
47 // Capture the base page and length of this memory block
49 Page
= MmPhysicalMemoryBlock
->Run
[i
].BasePage
;
50 PageCount
= MmPhysicalMemoryBlock
->Run
[i
].PageCount
;
53 // Check how far this memory block will go
55 LastPage
= Page
+ PageCount
;
58 // Trim it down to only the PFNs we're actually interested in
60 if ((LastPage
- 1) > HighestPfn
) LastPage
= HighestPfn
+ 1;
61 if (Page
< LowestPfn
) Page
= LowestPfn
;
64 // Skip this run if it's empty or fails to contain all the pages we need
66 if (!(PageCount
) || ((Page
+ SizeInPages
) > LastPage
)) continue;
69 // Now scan all the relevant PFNs in this run
72 for (Pfn1
= MiGetPfnEntry(Page
); Page
< LastPage
; Page
++, Pfn1
++)
75 // If this PFN is in use, ignore it
77 if (MiIsPfnInUse(Pfn1
)) continue;
80 // If we haven't chosen a start PFN yet and the caller specified an
81 // alignment, make sure the page matches the alignment restriction
83 if ((!(Length
) && (BoundaryPfn
)) &&
84 (((Page
^ (Page
+ SizeInPages
- 1)) & BoundaryMask
)))
87 // It does not, so bail out
93 // Increase the number of valid pages, and check if we have enough
95 if (++Length
== SizeInPages
)
98 // It appears we've amassed enough legitimate pages, rollback
100 Pfn1
-= (Length
- 1);
101 Page
-= (Length
- 1);
104 // Acquire the PFN lock
106 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
110 // Things might've changed for us. Is the page still free?
112 if (MiIsPfnInUse(Pfn1
)) break;
115 // So far so good. Is this the last confirmed valid page?
120 // Sanity check that we didn't go out of bounds
122 ASSERT(i
!= MmPhysicalMemoryBlock
->NumberOfRuns
);
125 // Loop until all PFN entries have been processed
127 EndPfn
= Pfn1
- SizeInPages
+ 1;
131 // This PFN is now a used page, set it up
133 MiUnlinkFreeOrZeroedPage(Pfn1
);
134 Pfn1
->u3
.e2
.ReferenceCount
= 1;
135 Pfn1
->u2
.ShareCount
= 1;
136 Pfn1
->u3
.e1
.PageLocation
= ActiveAndValid
;
137 Pfn1
->u3
.e1
.StartOfAllocation
= 0;
138 Pfn1
->u3
.e1
.EndOfAllocation
= 0;
139 Pfn1
->u3
.e1
.PrototypePte
= 0;
140 Pfn1
->u4
.VerifierAllocation
= 0;
141 Pfn1
->PteAddress
= (PVOID
)0xBAADF00D;
144 // Check if this is the last PFN, otherwise go on
146 if (Pfn1
== EndPfn
) break;
151 // Mark the first and last PFN so we can find them later
153 Pfn1
->u3
.e1
.StartOfAllocation
= 1;
154 (Pfn1
+ SizeInPages
- 1)->u3
.e1
.EndOfAllocation
= 1;
157 // Now it's safe to let go of the PFN lock
159 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
162 // Quick sanity check that the last PFN is consistent
164 EndPfn
= Pfn1
+ SizeInPages
;
165 ASSERT(EndPfn
== MiGetPfnEntry(Page
+ 1));
168 // Compute the first page, and make sure it's consistent
170 Page
-= SizeInPages
- 1;
171 ASSERT(Pfn1
== MiGetPfnEntry(Page
));
177 // Keep going. The purpose of this loop is to reconfirm that
178 // after acquiring the PFN lock these pages are still usable
185 // If we got here, something changed while we hadn't acquired
186 // the PFN lock yet, so we'll have to restart
188 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
192 } while (++i
!= MmPhysicalMemoryBlock
->NumberOfRuns
);
195 // And if we get here, it means no suitable physical memory runs were found
202 MiCheckForContiguousMemory(IN PVOID BaseAddress
,
203 IN PFN_NUMBER BaseAddressPages
,
204 IN PFN_NUMBER SizeInPages
,
205 IN PFN_NUMBER LowestPfn
,
206 IN PFN_NUMBER HighestPfn
,
207 IN PFN_NUMBER BoundaryPfn
,
208 IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute
)
210 PMMPTE StartPte
, EndPte
;
211 PFN_NUMBER PreviousPage
= 0, Page
, HighPage
, BoundaryMask
, Pages
= 0;
214 // Okay, first of all check if the PFNs match our restrictions
216 if (LowestPfn
> HighestPfn
) return NULL
;
217 if (LowestPfn
+ SizeInPages
<= LowestPfn
) return NULL
;
218 if (LowestPfn
+ SizeInPages
- 1 > HighestPfn
) return NULL
;
219 if (BaseAddressPages
< SizeInPages
) return NULL
;
222 // This is the last page we need to get to and the boundary requested
224 HighPage
= HighestPfn
+ 1 - SizeInPages
;
225 BoundaryMask
= ~(BoundaryPfn
- 1);
228 // And here's the PTEs for this allocation. Let's go scan them.
230 StartPte
= MiAddressToPte(BaseAddress
);
231 EndPte
= StartPte
+ BaseAddressPages
;
232 while (StartPte
< EndPte
)
235 // Get this PTE's page number
237 ASSERT (StartPte
->u
.Hard
.Valid
== 1);
238 Page
= PFN_FROM_PTE(StartPte
);
241 // Is this the beginning of our adventure?
246 // Check if this PFN is within our range
248 if ((Page
>= LowestPfn
) && (Page
<= HighPage
))
251 // It is! Do you care about boundary (alignment)?
253 if (!(BoundaryPfn
) ||
254 (!((Page
^ (Page
+ SizeInPages
- 1)) & BoundaryMask
)))
257 // You don't care, or you do care but we deliver
264 // Have we found all the pages we need by now?
265 // Incidently, this means you only wanted one page
267 if (Pages
== SizeInPages
)
272 return MiPteToAddress(StartPte
);
278 // Have we found a page that doesn't seem to be contiguous?
280 if (Page
!= (PreviousPage
+ 1))
283 // Ah crap, we have to start over
290 // Otherwise, we're still in the game. Do we have all our pages?
292 if (++Pages
== SizeInPages
)
295 // We do! This entire range was contiguous, so we'll return it!
297 return MiPteToAddress(StartPte
- Pages
+ 1);
302 // Try with the next PTE, remember this PFN
310 // All good returns are within the loop...
317 MiFindContiguousMemory(IN PFN_NUMBER LowestPfn
,
318 IN PFN_NUMBER HighestPfn
,
319 IN PFN_NUMBER BoundaryPfn
,
320 IN PFN_NUMBER SizeInPages
,
321 IN MEMORY_CACHING_TYPE CacheType
)
324 PHYSICAL_ADDRESS PhysicalAddress
;
329 ASSERT(SizeInPages
!= 0);
332 // Our last hope is to scan the free page list for contiguous pages
334 Page
= MiFindContiguousPages(LowestPfn
,
339 if (!Page
) return NULL
;
342 // We'll just piggyback on the I/O memory mapper
344 PhysicalAddress
.QuadPart
= Page
<< PAGE_SHIFT
;
345 BaseAddress
= MmMapIoSpace(PhysicalAddress
, SizeInPages
<< PAGE_SHIFT
, CacheType
);
348 /* Loop the PFN entries */
349 Pfn1
= MiGetPfnEntry(Page
);
350 EndPfn
= Pfn1
+ SizeInPages
;
351 PointerPte
= MiAddressToPte(BaseAddress
);
354 /* Write the PTE address */
355 Pfn1
->PteAddress
= PointerPte
;
356 Pfn1
->u4
.PteFrame
= PFN_FROM_PTE(MiAddressToPte(PointerPte
++));
357 } while (Pfn1
++ < EndPfn
);
359 /* Return the address */
365 MiAllocateContiguousMemory(IN SIZE_T NumberOfBytes
,
366 IN PFN_NUMBER LowestAcceptablePfn
,
367 IN PFN_NUMBER HighestAcceptablePfn
,
368 IN PFN_NUMBER BoundaryPfn
,
369 IN MEMORY_CACHING_TYPE CacheType
)
372 PFN_NUMBER SizeInPages
;
373 MI_PFN_CACHE_ATTRIBUTE CacheAttribute
;
376 // Verify count and cache type
378 ASSERT(NumberOfBytes
!= 0);
379 ASSERT(CacheType
<= MmWriteCombined
);
382 // Compute size requested
384 SizeInPages
= BYTES_TO_PAGES(NumberOfBytes
);
387 // Convert the cache attribute and check for cached requests
389 CacheAttribute
= MiPlatformCacheAttributes
[FALSE
][CacheType
];
390 if (CacheAttribute
== MiCached
)
393 // Because initial nonpaged pool is supposed to be contiguous, go ahead
394 // and try making a nonpaged pool allocation first.
396 BaseAddress
= ExAllocatePoolWithTag(NonPagedPoolCacheAligned
,
402 // Now make sure it's actually contiguous (if it came from expansion
405 if (MiCheckForContiguousMemory(BaseAddress
,
409 HighestAcceptablePfn
,
414 // Sweet, we're in business!
422 ExFreePool(BaseAddress
);
427 // According to MSDN, the system won't try anything else if you're higher
430 if (KeGetCurrentIrql() > APC_LEVEL
) return NULL
;
433 // Otherwise, we'll go try to find some
435 return MiFindContiguousMemory(LowestAcceptablePfn
,
436 HighestAcceptablePfn
,
444 MiFreeContiguousMemory(IN PVOID BaseAddress
)
447 PFN_NUMBER PageFrameIndex
, LastPage
, PageCount
;
448 PMMPFN Pfn1
, StartPfn
;
453 // First, check if the memory came from initial nonpaged pool, or expansion
455 if (((BaseAddress
>= MmNonPagedPoolStart
) &&
456 (BaseAddress
< (PVOID
)((ULONG_PTR
)MmNonPagedPoolStart
+
457 MmSizeOfNonPagedPoolInBytes
))) ||
458 ((BaseAddress
>= MmNonPagedPoolExpansionStart
) &&
459 (BaseAddress
< MmNonPagedPoolEnd
)))
462 // It did, so just use the pool to free this
464 ExFreePool(BaseAddress
);
468 /* Get the PTE and frame number for the allocation*/
469 PointerPte
= MiAddressToPte(BaseAddress
);
470 PageFrameIndex
= PFN_FROM_PTE(PointerPte
);
473 // Now get the PFN entry for this, and make sure it's the correct one
475 Pfn1
= MiGetPfnEntry(PageFrameIndex
);
476 if ((!Pfn1
) || (Pfn1
->u3
.e1
.StartOfAllocation
== 0))
479 // This probably means you did a free on an address that was in between
481 KeBugCheckEx(BAD_POOL_CALLER
,
483 (ULONG_PTR
)BaseAddress
,
489 // Now this PFN isn't the start of any allocation anymore, it's going out
492 Pfn1
->u3
.e1
.StartOfAllocation
= 0;
494 /* Loop the PFNs until we find the one that marks the end of the allocation */
497 /* Make sure these are the pages we setup in the allocation routine */
498 ASSERT(Pfn1
->u3
.e2
.ReferenceCount
== 1);
499 ASSERT(Pfn1
->u2
.ShareCount
== 1);
500 ASSERT(Pfn1
->PteAddress
== PointerPte
);
501 ASSERT(Pfn1
->u3
.e1
.PageLocation
== ActiveAndValid
);
502 ASSERT(Pfn1
->u4
.VerifierAllocation
== 0);
503 ASSERT(Pfn1
->u3
.e1
.PrototypePte
== 0);
505 /* Set the special pending delete marker */
506 MI_SET_PFN_DELETED(Pfn1
);
508 /* Keep going for assertions */
510 } while (Pfn1
++->u3
.e1
.EndOfAllocation
== 0);
513 // Found it, unmark it
516 Pfn1
->u3
.e1
.EndOfAllocation
= 0;
519 // Now compute how many pages this represents
521 PageCount
= (ULONG
)(Pfn1
- StartPfn
+ 1);
524 // So we can know how much to unmap (recall we piggyback on I/O mappings)
526 MmUnmapIoSpace(BaseAddress
, PageCount
<< PAGE_SHIFT
);
529 // Lock the PFN database
531 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
534 // Loop all the pages
536 LastPage
= PageFrameIndex
+ PageCount
;
537 Pfn1
= MiGetPfnEntry(PageFrameIndex
);
540 /* Decrement the share count and move on */
541 MiDecrementShareCount(Pfn1
++, PageFrameIndex
++);
542 } while (PageFrameIndex
< LastPage
);
545 // Release the PFN lock
547 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
550 /* PUBLIC FUNCTIONS ***********************************************************/
557 MmAllocateContiguousMemorySpecifyCache(IN SIZE_T NumberOfBytes
,
558 IN PHYSICAL_ADDRESS LowestAcceptableAddress OPTIONAL
,
559 IN PHYSICAL_ADDRESS HighestAcceptableAddress
,
560 IN PHYSICAL_ADDRESS BoundaryAddressMultiple OPTIONAL
,
561 IN MEMORY_CACHING_TYPE CacheType OPTIONAL
)
563 PFN_NUMBER LowestPfn
, HighestPfn
, BoundaryPfn
;
566 // Verify count and cache type
568 ASSERT(NumberOfBytes
!= 0);
569 ASSERT(CacheType
<= MmWriteCombined
);
572 // Convert the lowest address into a PFN
574 LowestPfn
= (PFN_NUMBER
)(LowestAcceptableAddress
.QuadPart
>> PAGE_SHIFT
);
575 if (BYTE_OFFSET(LowestAcceptableAddress
.LowPart
)) LowestPfn
++;
578 // Convert and validate the boundary address into a PFN
580 if (BYTE_OFFSET(BoundaryAddressMultiple
.LowPart
)) return NULL
;
581 BoundaryPfn
= (PFN_NUMBER
)(BoundaryAddressMultiple
.QuadPart
>> PAGE_SHIFT
);
584 // Convert the highest address into a PFN
586 HighestPfn
= (PFN_NUMBER
)(HighestAcceptableAddress
.QuadPart
>> PAGE_SHIFT
);
587 if (HighestPfn
> MmHighestPhysicalPage
) HighestPfn
= MmHighestPhysicalPage
;
590 // Validate the PFN bounds
592 if (LowestPfn
> HighestPfn
) return NULL
;
595 // Let the contiguous memory allocator handle it
597 return MiAllocateContiguousMemory(NumberOfBytes
,
609 MmAllocateContiguousMemory(IN SIZE_T NumberOfBytes
,
610 IN PHYSICAL_ADDRESS HighestAcceptableAddress
)
612 PFN_NUMBER HighestPfn
;
617 ASSERT(NumberOfBytes
!= 0);
620 // Convert and normalize the highest address into a PFN
622 HighestPfn
= (PFN_NUMBER
)(HighestAcceptableAddress
.QuadPart
>> PAGE_SHIFT
);
623 if (HighestPfn
> MmHighestPhysicalPage
) HighestPfn
= MmHighestPhysicalPage
;
626 // Let the contiguous memory allocator handle it
628 return MiAllocateContiguousMemory(NumberOfBytes
, 0, HighestPfn
, 0, MmCached
);
636 MmFreeContiguousMemory(IN PVOID BaseAddress
)
639 // Let the contiguous memory allocator handle it
641 MiFreeContiguousMemory(BaseAddress
);
649 MmFreeContiguousMemorySpecifyCache(IN PVOID BaseAddress
,
650 IN SIZE_T NumberOfBytes
,
651 IN MEMORY_CACHING_TYPE CacheType
)
654 // Just call the non-cached version (there's no cache issues for freeing)
656 MiFreeContiguousMemory(BaseAddress
);