2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/contmem.c
5 * PURPOSE: ARM Memory Manager Contiguous Memory Allocator
6 * PROGRAMMERS: ReactOS Portable Systems Group
9 /* INCLUDES *******************************************************************/
15 #define MODULE_INVOLVED_IN_ARM3
16 #include "../ARM3/miarm.h"
18 /* PRIVATE FUNCTIONS **********************************************************/
22 MiFindContiguousPages(IN PFN_NUMBER LowestPfn
,
23 IN PFN_NUMBER HighestPfn
,
24 IN PFN_NUMBER BoundaryPfn
,
25 IN PFN_NUMBER SizeInPages
,
26 IN MEMORY_CACHING_TYPE CacheType
)
28 PFN_NUMBER Page
, PageCount
, LastPage
, Length
, BoundaryMask
;
33 ASSERT(SizeInPages
!= 0);
36 // Convert the boundary PFN into an alignment mask
38 BoundaryMask
= ~(BoundaryPfn
- 1);
41 KeEnterGuardedRegion();
44 // Loop all the physical memory blocks
49 // Capture the base page and length of this memory block
51 Page
= MmPhysicalMemoryBlock
->Run
[i
].BasePage
;
52 PageCount
= MmPhysicalMemoryBlock
->Run
[i
].PageCount
;
55 // Check how far this memory block will go
57 LastPage
= Page
+ PageCount
;
60 // Trim it down to only the PFNs we're actually interested in
62 if ((LastPage
- 1) > HighestPfn
) LastPage
= HighestPfn
+ 1;
63 if (Page
< LowestPfn
) Page
= LowestPfn
;
66 // Skip this run if it's empty or fails to contain all the pages we need
68 if (!(PageCount
) || ((Page
+ SizeInPages
) > LastPage
)) continue;
71 // Now scan all the relevant PFNs in this run
74 for (Pfn1
= MI_PFN_ELEMENT(Page
); Page
< LastPage
; Page
++, Pfn1
++)
77 // If this PFN is in use, ignore it
79 if (MiIsPfnInUse(Pfn1
))
86 // If we haven't chosen a start PFN yet and the caller specified an
87 // alignment, make sure the page matches the alignment restriction
89 if ((!(Length
) && (BoundaryPfn
)) &&
90 (((Page
^ (Page
+ SizeInPages
- 1)) & BoundaryMask
)))
93 // It does not, so bail out
99 // Increase the number of valid pages, and check if we have enough
101 if (++Length
== SizeInPages
)
104 // It appears we've amassed enough legitimate pages, rollback
106 Pfn1
-= (Length
- 1);
107 Page
-= (Length
- 1);
110 // Acquire the PFN lock
112 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
116 // Things might've changed for us. Is the page still free?
118 if (MiIsPfnInUse(Pfn1
)) break;
121 // So far so good. Is this the last confirmed valid page?
126 // Sanity check that we didn't go out of bounds
128 ASSERT(i
!= MmPhysicalMemoryBlock
->NumberOfRuns
);
131 // Loop until all PFN entries have been processed
133 EndPfn
= Pfn1
- SizeInPages
+ 1;
137 // This PFN is now a used page, set it up
139 MI_SET_USAGE(MI_USAGE_CONTINOUS_ALLOCATION
);
140 MI_SET_PROCESS2("Kernel Driver");
141 MiUnlinkFreeOrZeroedPage(Pfn1
);
142 Pfn1
->u3
.e2
.ReferenceCount
= 1;
143 Pfn1
->u2
.ShareCount
= 1;
144 Pfn1
->u3
.e1
.PageLocation
= ActiveAndValid
;
145 Pfn1
->u3
.e1
.StartOfAllocation
= 0;
146 Pfn1
->u3
.e1
.EndOfAllocation
= 0;
147 Pfn1
->u3
.e1
.PrototypePte
= 0;
148 Pfn1
->u4
.VerifierAllocation
= 0;
149 Pfn1
->PteAddress
= (PVOID
)0xBAADF00D;
152 // Check if this is the last PFN, otherwise go on
154 if (Pfn1
== EndPfn
) break;
159 // Mark the first and last PFN so we can find them later
161 Pfn1
->u3
.e1
.StartOfAllocation
= 1;
162 (Pfn1
+ SizeInPages
- 1)->u3
.e1
.EndOfAllocation
= 1;
165 // Now it's safe to let go of the PFN lock
167 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
170 // Quick sanity check that the last PFN is consistent
172 EndPfn
= Pfn1
+ SizeInPages
;
173 ASSERT(EndPfn
== MI_PFN_ELEMENT(Page
+ 1));
176 // Compute the first page, and make sure it's consistent
178 Page
= Page
- SizeInPages
+ 1;
179 ASSERT(Pfn1
== MI_PFN_ELEMENT(Page
));
182 /* Enable APCs and return the page */
183 KeLeaveGuardedRegion();
188 // Keep going. The purpose of this loop is to reconfirm that
189 // after acquiring the PFN lock these pages are still usable
196 // If we got here, something changed while we hadn't acquired
197 // the PFN lock yet, so we'll have to restart
199 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
203 } while (++i
!= MmPhysicalMemoryBlock
->NumberOfRuns
);
206 // And if we get here, it means no suitable physical memory runs were found
213 MiCheckForContiguousMemory(IN PVOID BaseAddress
,
214 IN PFN_NUMBER BaseAddressPages
,
215 IN PFN_NUMBER SizeInPages
,
216 IN PFN_NUMBER LowestPfn
,
217 IN PFN_NUMBER HighestPfn
,
218 IN PFN_NUMBER BoundaryPfn
,
219 IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute
)
221 PMMPTE StartPte
, EndPte
;
222 PFN_NUMBER PreviousPage
= 0, Page
, HighPage
, BoundaryMask
, Pages
= 0;
225 // Okay, first of all check if the PFNs match our restrictions
227 if (LowestPfn
> HighestPfn
) return NULL
;
228 if (LowestPfn
+ SizeInPages
<= LowestPfn
) return NULL
;
229 if (LowestPfn
+ SizeInPages
- 1 > HighestPfn
) return NULL
;
230 if (BaseAddressPages
< SizeInPages
) return NULL
;
233 // This is the last page we need to get to and the boundary requested
235 HighPage
= HighestPfn
+ 1 - SizeInPages
;
236 BoundaryMask
= ~(BoundaryPfn
- 1);
239 // And here's the PTEs for this allocation. Let's go scan them.
241 StartPte
= MiAddressToPte(BaseAddress
);
242 EndPte
= StartPte
+ BaseAddressPages
;
243 while (StartPte
< EndPte
)
246 // Get this PTE's page number
248 ASSERT (StartPte
->u
.Hard
.Valid
== 1);
249 Page
= PFN_FROM_PTE(StartPte
);
252 // Is this the beginning of our adventure?
257 // Check if this PFN is within our range
259 if ((Page
>= LowestPfn
) && (Page
<= HighPage
))
262 // It is! Do you care about boundary (alignment)?
264 if (!(BoundaryPfn
) ||
265 (!((Page
^ (Page
+ SizeInPages
- 1)) & BoundaryMask
)))
268 // You don't care, or you do care but we deliver
275 // Have we found all the pages we need by now?
276 // Incidently, this means you only wanted one page
278 if (Pages
== SizeInPages
)
283 return MiPteToAddress(StartPte
);
289 // Have we found a page that doesn't seem to be contiguous?
291 if (Page
!= (PreviousPage
+ 1))
294 // Ah crap, we have to start over
301 // Otherwise, we're still in the game. Do we have all our pages?
303 if (++Pages
== SizeInPages
)
306 // We do! This entire range was contiguous, so we'll return it!
308 return MiPteToAddress(StartPte
- Pages
+ 1);
313 // Try with the next PTE, remember this PFN
321 // All good returns are within the loop...
328 MiFindContiguousMemory(IN PFN_NUMBER LowestPfn
,
329 IN PFN_NUMBER HighestPfn
,
330 IN PFN_NUMBER BoundaryPfn
,
331 IN PFN_NUMBER SizeInPages
,
332 IN MEMORY_CACHING_TYPE CacheType
)
335 PHYSICAL_ADDRESS PhysicalAddress
;
340 ASSERT(SizeInPages
!= 0);
343 // Our last hope is to scan the free page list for contiguous pages
345 Page
= MiFindContiguousPages(LowestPfn
,
350 if (!Page
) return NULL
;
353 // We'll just piggyback on the I/O memory mapper
355 PhysicalAddress
.QuadPart
= Page
<< PAGE_SHIFT
;
356 BaseAddress
= MmMapIoSpace(PhysicalAddress
, SizeInPages
<< PAGE_SHIFT
, CacheType
);
359 /* Loop the PFN entries */
360 Pfn1
= MiGetPfnEntry(Page
);
361 EndPfn
= Pfn1
+ SizeInPages
;
362 PointerPte
= MiAddressToPte(BaseAddress
);
365 /* Write the PTE address */
366 Pfn1
->PteAddress
= PointerPte
;
367 Pfn1
->u4
.PteFrame
= PFN_FROM_PTE(MiAddressToPte(PointerPte
++));
368 } while (++Pfn1
< EndPfn
);
370 /* Return the address */
376 MiAllocateContiguousMemory(IN SIZE_T NumberOfBytes
,
377 IN PFN_NUMBER LowestAcceptablePfn
,
378 IN PFN_NUMBER HighestAcceptablePfn
,
379 IN PFN_NUMBER BoundaryPfn
,
380 IN MEMORY_CACHING_TYPE CacheType
)
383 PFN_NUMBER SizeInPages
;
384 MI_PFN_CACHE_ATTRIBUTE CacheAttribute
;
387 // Verify count and cache type
389 ASSERT(NumberOfBytes
!= 0);
390 ASSERT(CacheType
<= MmWriteCombined
);
393 // Compute size requested
395 SizeInPages
= BYTES_TO_PAGES(NumberOfBytes
);
398 // Convert the cache attribute and check for cached requests
400 CacheAttribute
= MiPlatformCacheAttributes
[FALSE
][CacheType
];
401 if (CacheAttribute
== MiCached
)
404 // Because initial nonpaged pool is supposed to be contiguous, go ahead
405 // and try making a nonpaged pool allocation first.
407 BaseAddress
= ExAllocatePoolWithTag(NonPagedPoolCacheAligned
,
413 // Now make sure it's actually contiguous (if it came from expansion
416 if (MiCheckForContiguousMemory(BaseAddress
,
420 HighestAcceptablePfn
,
425 // Sweet, we're in business!
433 ExFreePoolWithTag(BaseAddress
, 'mCmM');
438 // According to MSDN, the system won't try anything else if you're higher
441 if (KeGetCurrentIrql() > APC_LEVEL
) return NULL
;
444 // Otherwise, we'll go try to find some
446 return MiFindContiguousMemory(LowestAcceptablePfn
,
447 HighestAcceptablePfn
,
455 MiFreeContiguousMemory(IN PVOID BaseAddress
)
458 PFN_NUMBER PageFrameIndex
, LastPage
, PageCount
;
459 PMMPFN Pfn1
, StartPfn
;
464 // First, check if the memory came from initial nonpaged pool, or expansion
466 if (((BaseAddress
>= MmNonPagedPoolStart
) &&
467 (BaseAddress
< (PVOID
)((ULONG_PTR
)MmNonPagedPoolStart
+
468 MmSizeOfNonPagedPoolInBytes
))) ||
469 ((BaseAddress
>= MmNonPagedPoolExpansionStart
) &&
470 (BaseAddress
< MmNonPagedPoolEnd
)))
473 // It did, so just use the pool to free this
475 ExFreePoolWithTag(BaseAddress
, 'mCmM');
479 /* Get the PTE and frame number for the allocation*/
480 PointerPte
= MiAddressToPte(BaseAddress
);
481 PageFrameIndex
= PFN_FROM_PTE(PointerPte
);
484 // Now get the PFN entry for this, and make sure it's the correct one
486 Pfn1
= MiGetPfnEntry(PageFrameIndex
);
487 if ((!Pfn1
) || (Pfn1
->u3
.e1
.StartOfAllocation
== 0))
490 // This probably means you did a free on an address that was in between
492 KeBugCheckEx(BAD_POOL_CALLER
,
494 (ULONG_PTR
)BaseAddress
,
500 // Now this PFN isn't the start of any allocation anymore, it's going out
503 Pfn1
->u3
.e1
.StartOfAllocation
= 0;
505 /* Loop the PFNs until we find the one that marks the end of the allocation */
508 /* Make sure these are the pages we setup in the allocation routine */
509 ASSERT(Pfn1
->u3
.e2
.ReferenceCount
== 1);
510 ASSERT(Pfn1
->u2
.ShareCount
== 1);
511 ASSERT(Pfn1
->PteAddress
== PointerPte
);
512 ASSERT(Pfn1
->u3
.e1
.PageLocation
== ActiveAndValid
);
513 ASSERT(Pfn1
->u4
.VerifierAllocation
== 0);
514 ASSERT(Pfn1
->u3
.e1
.PrototypePte
== 0);
516 /* Set the special pending delete marker */
517 MI_SET_PFN_DELETED(Pfn1
);
519 /* Keep going for assertions */
521 } while (Pfn1
++->u3
.e1
.EndOfAllocation
== 0);
524 // Found it, unmark it
527 Pfn1
->u3
.e1
.EndOfAllocation
= 0;
530 // Now compute how many pages this represents
532 PageCount
= (ULONG
)(Pfn1
- StartPfn
+ 1);
535 // So we can know how much to unmap (recall we piggyback on I/O mappings)
537 MmUnmapIoSpace(BaseAddress
, PageCount
<< PAGE_SHIFT
);
540 // Lock the PFN database
542 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
545 // Loop all the pages
547 LastPage
= PageFrameIndex
+ PageCount
;
548 Pfn1
= MiGetPfnEntry(PageFrameIndex
);
551 /* Decrement the share count and move on */
552 MiDecrementShareCount(Pfn1
++, PageFrameIndex
++);
553 } while (PageFrameIndex
< LastPage
);
556 // Release the PFN lock
558 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
561 /* PUBLIC FUNCTIONS ***********************************************************/
568 MmAllocateContiguousMemorySpecifyCache(IN SIZE_T NumberOfBytes
,
569 IN PHYSICAL_ADDRESS LowestAcceptableAddress OPTIONAL
,
570 IN PHYSICAL_ADDRESS HighestAcceptableAddress
,
571 IN PHYSICAL_ADDRESS BoundaryAddressMultiple OPTIONAL
,
572 IN MEMORY_CACHING_TYPE CacheType OPTIONAL
)
574 PFN_NUMBER LowestPfn
, HighestPfn
, BoundaryPfn
;
577 // Verify count and cache type
579 ASSERT(NumberOfBytes
!= 0);
580 ASSERT(CacheType
<= MmWriteCombined
);
583 // Convert the lowest address into a PFN
585 LowestPfn
= (PFN_NUMBER
)(LowestAcceptableAddress
.QuadPart
>> PAGE_SHIFT
);
586 if (BYTE_OFFSET(LowestAcceptableAddress
.LowPart
)) LowestPfn
++;
589 // Convert and validate the boundary address into a PFN
591 if (BYTE_OFFSET(BoundaryAddressMultiple
.LowPart
)) return NULL
;
592 BoundaryPfn
= (PFN_NUMBER
)(BoundaryAddressMultiple
.QuadPart
>> PAGE_SHIFT
);
595 // Convert the highest address into a PFN
597 HighestPfn
= (PFN_NUMBER
)(HighestAcceptableAddress
.QuadPart
>> PAGE_SHIFT
);
598 if (HighestPfn
> MmHighestPhysicalPage
) HighestPfn
= MmHighestPhysicalPage
;
601 // Validate the PFN bounds
603 if (LowestPfn
> HighestPfn
) return NULL
;
606 // Let the contiguous memory allocator handle it
608 return MiAllocateContiguousMemory(NumberOfBytes
,
620 MmAllocateContiguousMemory(IN SIZE_T NumberOfBytes
,
621 IN PHYSICAL_ADDRESS HighestAcceptableAddress
)
623 PFN_NUMBER HighestPfn
;
628 ASSERT(NumberOfBytes
!= 0);
631 // Convert and normalize the highest address into a PFN
633 HighestPfn
= (PFN_NUMBER
)(HighestAcceptableAddress
.QuadPart
>> PAGE_SHIFT
);
634 if (HighestPfn
> MmHighestPhysicalPage
) HighestPfn
= MmHighestPhysicalPage
;
637 // Let the contiguous memory allocator handle it
639 return MiAllocateContiguousMemory(NumberOfBytes
, 0, HighestPfn
, 0, MmCached
);
647 MmFreeContiguousMemory(IN PVOID BaseAddress
)
650 // Let the contiguous memory allocator handle it
652 MiFreeContiguousMemory(BaseAddress
);
660 MmFreeContiguousMemorySpecifyCache(IN PVOID BaseAddress
,
661 IN SIZE_T NumberOfBytes
,
662 IN MEMORY_CACHING_TYPE CacheType
)
665 // Just call the non-cached version (there's no cache issues for freeing)
667 MiFreeContiguousMemory(BaseAddress
);