2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/contmem.c
5 * PURPOSE: ARM Memory Manager Contiguous Memory Allocator
6 * PROGRAMMERS: ReactOS Portable Systems Group
9 /* INCLUDES *******************************************************************/
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
18 /* PRIVATE FUNCTIONS **********************************************************/
22 MiFindContiguousPages(IN PFN_NUMBER LowestPfn
,
23 IN PFN_NUMBER HighestPfn
,
24 IN PFN_NUMBER BoundaryPfn
,
25 IN PFN_NUMBER SizeInPages
,
26 IN MEMORY_CACHING_TYPE CacheType
)
28 PFN_NUMBER Page
, PageCount
, LastPage
, Length
, BoundaryMask
;
33 ASSERT(SizeInPages
!= 0);
36 // Convert the boundary PFN into an alignment mask
38 BoundaryMask
= ~(BoundaryPfn
- 1);
41 KeEnterGuardedRegion();
44 // Loop all the physical memory blocks
49 // Capture the base page and length of this memory block
51 Page
= MmPhysicalMemoryBlock
->Run
[i
].BasePage
;
52 PageCount
= MmPhysicalMemoryBlock
->Run
[i
].PageCount
;
55 // Check how far this memory block will go
57 LastPage
= Page
+ PageCount
;
60 // Trim it down to only the PFNs we're actually interested in
62 if ((LastPage
- 1) > HighestPfn
) LastPage
= HighestPfn
+ 1;
63 if (Page
< LowestPfn
) Page
= LowestPfn
;
66 // Skip this run if it's empty or fails to contain all the pages we need
68 if (!(PageCount
) || ((Page
+ SizeInPages
) > LastPage
)) continue;
71 // Now scan all the relevant PFNs in this run
74 for (Pfn1
= MI_PFN_ELEMENT(Page
); Page
< LastPage
; Page
++, Pfn1
++)
77 // If this PFN is in use, ignore it
79 if (MiIsPfnInUse(Pfn1
))
86 // If we haven't chosen a start PFN yet and the caller specified an
87 // alignment, make sure the page matches the alignment restriction
89 if ((!(Length
) && (BoundaryPfn
)) &&
90 (((Page
^ (Page
+ SizeInPages
- 1)) & BoundaryMask
)))
93 // It does not, so bail out
99 // Increase the number of valid pages, and check if we have enough
101 if (++Length
== SizeInPages
)
104 // It appears we've amassed enough legitimate pages, rollback
106 Pfn1
-= (Length
- 1);
107 Page
-= (Length
- 1);
110 // Acquire the PFN lock
112 OldIrql
= MiAcquirePfnLock();
116 // Things might've changed for us. Is the page still free?
118 if (MiIsPfnInUse(Pfn1
)) break;
121 // So far so good. Is this the last confirmed valid page?
126 // Sanity check that we didn't go out of bounds
128 ASSERT(i
!= MmPhysicalMemoryBlock
->NumberOfRuns
);
131 // Loop until all PFN entries have been processed
133 EndPfn
= Pfn1
- SizeInPages
+ 1;
137 // This PFN is now a used page, set it up
139 MI_SET_USAGE(MI_USAGE_CONTINOUS_ALLOCATION
);
140 MI_SET_PROCESS2("Kernel Driver");
141 MiUnlinkFreeOrZeroedPage(Pfn1
);
142 Pfn1
->u3
.e2
.ReferenceCount
= 1;
143 Pfn1
->u2
.ShareCount
= 1;
144 Pfn1
->u3
.e1
.PageLocation
= ActiveAndValid
;
145 Pfn1
->u3
.e1
.StartOfAllocation
= 0;
146 Pfn1
->u3
.e1
.EndOfAllocation
= 0;
147 Pfn1
->u3
.e1
.PrototypePte
= 0;
148 Pfn1
->u4
.VerifierAllocation
= 0;
149 Pfn1
->PteAddress
= (PVOID
)(ULONG_PTR
)0xBAADF00DBAADF00DULL
;
152 // Check if this is the last PFN, otherwise go on
154 if (Pfn1
== EndPfn
) break;
159 // Mark the first and last PFN so we can find them later
161 Pfn1
->u3
.e1
.StartOfAllocation
= 1;
162 (Pfn1
+ SizeInPages
- 1)->u3
.e1
.EndOfAllocation
= 1;
165 // Now it's safe to let go of the PFN lock
167 MiReleasePfnLock(OldIrql
);
170 // Quick sanity check that the last PFN is consistent
172 EndPfn
= Pfn1
+ SizeInPages
;
173 ASSERT(EndPfn
== MI_PFN_ELEMENT(Page
+ 1));
176 // Compute the first page, and make sure it's consistent
178 Page
= Page
- SizeInPages
+ 1;
179 ASSERT(Pfn1
== MI_PFN_ELEMENT(Page
));
182 /* Enable APCs and return the page */
183 KeLeaveGuardedRegion();
188 // Keep going. The purpose of this loop is to reconfirm that
189 // after acquiring the PFN lock these pages are still usable
196 // If we got here, something changed while we hadn't acquired
197 // the PFN lock yet, so we'll have to restart
199 MiReleasePfnLock(OldIrql
);
203 } while (++i
!= MmPhysicalMemoryBlock
->NumberOfRuns
);
206 // And if we get here, it means no suitable physical memory runs were found
208 KeLeaveGuardedRegion();
214 MiCheckForContiguousMemory(IN PVOID BaseAddress
,
215 IN PFN_NUMBER BaseAddressPages
,
216 IN PFN_NUMBER SizeInPages
,
217 IN PFN_NUMBER LowestPfn
,
218 IN PFN_NUMBER HighestPfn
,
219 IN PFN_NUMBER BoundaryPfn
,
220 IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute
)
222 PMMPTE StartPte
, EndPte
;
223 PFN_NUMBER PreviousPage
= 0, Page
, HighPage
, BoundaryMask
, Pages
= 0;
226 // Okay, first of all check if the PFNs match our restrictions
228 if (LowestPfn
> HighestPfn
) return NULL
;
229 if (LowestPfn
+ SizeInPages
<= LowestPfn
) return NULL
;
230 if (LowestPfn
+ SizeInPages
- 1 > HighestPfn
) return NULL
;
231 if (BaseAddressPages
< SizeInPages
) return NULL
;
234 // This is the last page we need to get to and the boundary requested
236 HighPage
= HighestPfn
+ 1 - SizeInPages
;
237 BoundaryMask
= ~(BoundaryPfn
- 1);
240 // And here's the PTEs for this allocation. Let's go scan them.
242 StartPte
= MiAddressToPte(BaseAddress
);
243 EndPte
= StartPte
+ BaseAddressPages
;
244 while (StartPte
< EndPte
)
247 // Get this PTE's page number
249 ASSERT (StartPte
->u
.Hard
.Valid
== 1);
250 Page
= PFN_FROM_PTE(StartPte
);
253 // Is this the beginning of our adventure?
258 // Check if this PFN is within our range
260 if ((Page
>= LowestPfn
) && (Page
<= HighPage
))
263 // It is! Do you care about boundary (alignment)?
265 if (!(BoundaryPfn
) ||
266 (!((Page
^ (Page
+ SizeInPages
- 1)) & BoundaryMask
)))
269 // You don't care, or you do care but we deliver
276 // Have we found all the pages we need by now?
277 // Incidently, this means you only wanted one page
279 if (Pages
== SizeInPages
)
284 return MiPteToAddress(StartPte
);
290 // Have we found a page that doesn't seem to be contiguous?
292 if (Page
!= (PreviousPage
+ 1))
295 // Ah crap, we have to start over
302 // Otherwise, we're still in the game. Do we have all our pages?
304 if (++Pages
== SizeInPages
)
307 // We do! This entire range was contiguous, so we'll return it!
309 return MiPteToAddress(StartPte
- Pages
+ 1);
314 // Try with the next PTE, remember this PFN
322 // All good returns are within the loop...
329 MiFindContiguousMemory(IN PFN_NUMBER LowestPfn
,
330 IN PFN_NUMBER HighestPfn
,
331 IN PFN_NUMBER BoundaryPfn
,
332 IN PFN_NUMBER SizeInPages
,
333 IN MEMORY_CACHING_TYPE CacheType
)
336 PHYSICAL_ADDRESS PhysicalAddress
;
341 ASSERT(SizeInPages
!= 0);
344 // Our last hope is to scan the free page list for contiguous pages
346 Page
= MiFindContiguousPages(LowestPfn
,
351 if (!Page
) return NULL
;
354 // We'll just piggyback on the I/O memory mapper
356 PhysicalAddress
.QuadPart
= Page
<< PAGE_SHIFT
;
357 BaseAddress
= MmMapIoSpace(PhysicalAddress
, SizeInPages
<< PAGE_SHIFT
, CacheType
);
360 /* Loop the PFN entries */
361 Pfn1
= MiGetPfnEntry(Page
);
362 EndPfn
= Pfn1
+ SizeInPages
;
363 PointerPte
= MiAddressToPte(BaseAddress
);
366 /* Write the PTE address */
367 Pfn1
->PteAddress
= PointerPte
;
368 Pfn1
->u4
.PteFrame
= PFN_FROM_PTE(MiAddressToPte(PointerPte
++));
369 } while (++Pfn1
< EndPfn
);
371 /* Return the address */
377 MiAllocateContiguousMemory(IN SIZE_T NumberOfBytes
,
378 IN PFN_NUMBER LowestAcceptablePfn
,
379 IN PFN_NUMBER HighestAcceptablePfn
,
380 IN PFN_NUMBER BoundaryPfn
,
381 IN MEMORY_CACHING_TYPE CacheType
)
384 PFN_NUMBER SizeInPages
;
385 MI_PFN_CACHE_ATTRIBUTE CacheAttribute
;
388 // Verify count and cache type
390 ASSERT(NumberOfBytes
!= 0);
391 ASSERT(CacheType
<= MmWriteCombined
);
394 // Compute size requested
396 SizeInPages
= BYTES_TO_PAGES(NumberOfBytes
);
399 // Convert the cache attribute and check for cached requests
401 CacheAttribute
= MiPlatformCacheAttributes
[FALSE
][CacheType
];
402 if (CacheAttribute
== MiCached
)
405 // Because initial nonpaged pool is supposed to be contiguous, go ahead
406 // and try making a nonpaged pool allocation first.
408 BaseAddress
= ExAllocatePoolWithTag(NonPagedPoolCacheAligned
,
414 // Now make sure it's actually contiguous (if it came from expansion
417 if (MiCheckForContiguousMemory(BaseAddress
,
421 HighestAcceptablePfn
,
426 // Sweet, we're in business!
434 ExFreePoolWithTag(BaseAddress
, 'mCmM');
439 // According to MSDN, the system won't try anything else if you're higher
442 if (KeGetCurrentIrql() > APC_LEVEL
) return NULL
;
445 // Otherwise, we'll go try to find some
447 BaseAddress
= MiFindContiguousMemory(LowestAcceptablePfn
,
448 HighestAcceptablePfn
,
454 DPRINT1("Unable to allocate contiguous memory for %d bytes (%d pages), out of memory!\n", NumberOfBytes
, SizeInPages
);
461 MiFreeContiguousMemory(IN PVOID BaseAddress
)
464 PFN_NUMBER PageFrameIndex
, LastPage
, PageCount
;
465 PMMPFN Pfn1
, StartPfn
;
470 // First, check if the memory came from initial nonpaged pool, or expansion
472 if (((BaseAddress
>= MmNonPagedPoolStart
) &&
473 (BaseAddress
< (PVOID
)((ULONG_PTR
)MmNonPagedPoolStart
+
474 MmSizeOfNonPagedPoolInBytes
))) ||
475 ((BaseAddress
>= MmNonPagedPoolExpansionStart
) &&
476 (BaseAddress
< MmNonPagedPoolEnd
)))
479 // It did, so just use the pool to free this
481 ExFreePoolWithTag(BaseAddress
, 'mCmM');
485 /* Get the PTE and frame number for the allocation*/
486 PointerPte
= MiAddressToPte(BaseAddress
);
487 PageFrameIndex
= PFN_FROM_PTE(PointerPte
);
490 // Now get the PFN entry for this, and make sure it's the correct one
492 Pfn1
= MiGetPfnEntry(PageFrameIndex
);
493 if ((!Pfn1
) || (Pfn1
->u3
.e1
.StartOfAllocation
== 0))
496 // This probably means you did a free on an address that was in between
498 KeBugCheckEx(BAD_POOL_CALLER
,
500 (ULONG_PTR
)BaseAddress
,
506 // Now this PFN isn't the start of any allocation anymore, it's going out
509 Pfn1
->u3
.e1
.StartOfAllocation
= 0;
511 /* Loop the PFNs until we find the one that marks the end of the allocation */
514 /* Make sure these are the pages we setup in the allocation routine */
515 ASSERT(Pfn1
->u3
.e2
.ReferenceCount
== 1);
516 ASSERT(Pfn1
->u2
.ShareCount
== 1);
517 ASSERT(Pfn1
->PteAddress
== PointerPte
);
518 ASSERT(Pfn1
->u3
.e1
.PageLocation
== ActiveAndValid
);
519 ASSERT(Pfn1
->u4
.VerifierAllocation
== 0);
520 ASSERT(Pfn1
->u3
.e1
.PrototypePte
== 0);
522 /* Set the special pending delete marker */
523 MI_SET_PFN_DELETED(Pfn1
);
525 /* Keep going for assertions */
527 } while (Pfn1
++->u3
.e1
.EndOfAllocation
== 0);
530 // Found it, unmark it
533 Pfn1
->u3
.e1
.EndOfAllocation
= 0;
536 // Now compute how many pages this represents
538 PageCount
= (ULONG
)(Pfn1
- StartPfn
+ 1);
541 // So we can know how much to unmap (recall we piggyback on I/O mappings)
543 MmUnmapIoSpace(BaseAddress
, PageCount
<< PAGE_SHIFT
);
546 // Lock the PFN database
548 OldIrql
= MiAcquirePfnLock();
551 // Loop all the pages
553 LastPage
= PageFrameIndex
+ PageCount
;
554 Pfn1
= MiGetPfnEntry(PageFrameIndex
);
557 /* Decrement the share count and move on */
558 MiDecrementShareCount(Pfn1
++, PageFrameIndex
++);
559 } while (PageFrameIndex
< LastPage
);
562 // Release the PFN lock
564 MiReleasePfnLock(OldIrql
);
567 /* PUBLIC FUNCTIONS ***********************************************************/
574 MmAllocateContiguousMemorySpecifyCache(IN SIZE_T NumberOfBytes
,
575 IN PHYSICAL_ADDRESS LowestAcceptableAddress OPTIONAL
,
576 IN PHYSICAL_ADDRESS HighestAcceptableAddress
,
577 IN PHYSICAL_ADDRESS BoundaryAddressMultiple OPTIONAL
,
578 IN MEMORY_CACHING_TYPE CacheType OPTIONAL
)
580 PFN_NUMBER LowestPfn
, HighestPfn
, BoundaryPfn
;
583 // Verify count and cache type
585 ASSERT(NumberOfBytes
!= 0);
586 ASSERT(CacheType
<= MmWriteCombined
);
589 // Convert the lowest address into a PFN
591 LowestPfn
= (PFN_NUMBER
)(LowestAcceptableAddress
.QuadPart
>> PAGE_SHIFT
);
592 if (BYTE_OFFSET(LowestAcceptableAddress
.LowPart
)) LowestPfn
++;
595 // Convert and validate the boundary address into a PFN
597 if (BYTE_OFFSET(BoundaryAddressMultiple
.LowPart
)) return NULL
;
598 BoundaryPfn
= (PFN_NUMBER
)(BoundaryAddressMultiple
.QuadPart
>> PAGE_SHIFT
);
601 // Convert the highest address into a PFN
603 HighestPfn
= (PFN_NUMBER
)(HighestAcceptableAddress
.QuadPart
>> PAGE_SHIFT
);
604 if (HighestPfn
> MmHighestPhysicalPage
) HighestPfn
= MmHighestPhysicalPage
;
607 // Validate the PFN bounds
609 if (LowestPfn
> HighestPfn
) return NULL
;
612 // Let the contiguous memory allocator handle it
614 return MiAllocateContiguousMemory(NumberOfBytes
,
626 MmAllocateContiguousMemory(IN SIZE_T NumberOfBytes
,
627 IN PHYSICAL_ADDRESS HighestAcceptableAddress
)
629 PFN_NUMBER HighestPfn
;
634 ASSERT(NumberOfBytes
!= 0);
637 // Convert and normalize the highest address into a PFN
639 HighestPfn
= (PFN_NUMBER
)(HighestAcceptableAddress
.QuadPart
>> PAGE_SHIFT
);
640 if (HighestPfn
> MmHighestPhysicalPage
) HighestPfn
= MmHighestPhysicalPage
;
643 // Let the contiguous memory allocator handle it
645 return MiAllocateContiguousMemory(NumberOfBytes
, 0, HighestPfn
, 0, MmCached
);
653 MmFreeContiguousMemory(IN PVOID BaseAddress
)
656 // Let the contiguous memory allocator handle it
658 MiFreeContiguousMemory(BaseAddress
);
666 MmFreeContiguousMemorySpecifyCache(IN PVOID BaseAddress
,
667 IN SIZE_T NumberOfBytes
,
668 IN MEMORY_CACHING_TYPE CacheType
)
671 // Just call the non-cached version (there's no cache issues for freeing)
673 MiFreeContiguousMemory(BaseAddress
);