79eacda42a046d56be6752cd1b0acd694c870903
[reactos.git] / ntoskrnl / mm / ARM3 / contmem.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/contmem.c
5 * PURPOSE: ARM Memory Manager Contiguous Memory Allocator
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::CONTMEM"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
18
19 /* PRIVATE FUNCTIONS **********************************************************/
20
21 PFN_NUMBER
22 NTAPI
23 MiFindContiguousPages(IN PFN_NUMBER LowestPfn,
24 IN PFN_NUMBER HighestPfn,
25 IN PFN_NUMBER BoundaryPfn,
26 IN PFN_NUMBER SizeInPages,
27 IN MEMORY_CACHING_TYPE CacheType)
28 {
29 PFN_NUMBER Page, PageCount, LastPage, Length, BoundaryMask;
30 ULONG i = 0;
31 PMMPFN Pfn1, EndPfn;
32 KIRQL OldIrql;
33 PAGED_CODE();
34 ASSERT(SizeInPages != 0);
35
36 //
37 // Convert the boundary PFN into an alignment mask
38 //
39 BoundaryMask = ~(BoundaryPfn - 1);
40
41 /* Disable APCs */
42 KeEnterGuardedRegion();
43
44 //
45 // Loop all the physical memory blocks
46 //
47 do
48 {
49 //
50 // Capture the base page and length of this memory block
51 //
52 Page = MmPhysicalMemoryBlock->Run[i].BasePage;
53 PageCount = MmPhysicalMemoryBlock->Run[i].PageCount;
54
55 //
56 // Check how far this memory block will go
57 //
58 LastPage = Page + PageCount;
59
60 //
61 // Trim it down to only the PFNs we're actually interested in
62 //
63 if ((LastPage - 1) > HighestPfn) LastPage = HighestPfn + 1;
64 if (Page < LowestPfn) Page = LowestPfn;
65
66 //
67 // Skip this run if it's empty or fails to contain all the pages we need
68 //
69 if (!(PageCount) || ((Page + SizeInPages) > LastPage)) continue;
70
71 //
72 // Now scan all the relevant PFNs in this run
73 //
74 Length = 0;
75 for (Pfn1 = MI_PFN_ELEMENT(Page); Page < LastPage; Page++, Pfn1++)
76 {
77 //
78 // If this PFN is in use, ignore it
79 //
80 if (MiIsPfnInUse(Pfn1))
81 {
82 Length = 0;
83 continue;
84 }
85
86 //
87 // If we haven't chosen a start PFN yet and the caller specified an
88 // alignment, make sure the page matches the alignment restriction
89 //
90 if ((!(Length) && (BoundaryPfn)) &&
91 (((Page ^ (Page + SizeInPages - 1)) & BoundaryMask)))
92 {
93 //
94 // It does not, so bail out
95 //
96 continue;
97 }
98
99 //
100 // Increase the number of valid pages, and check if we have enough
101 //
102 if (++Length == SizeInPages)
103 {
104 //
105 // It appears we've amassed enough legitimate pages, rollback
106 //
107 Pfn1 -= (Length - 1);
108 Page -= (Length - 1);
109
110 //
111 // Acquire the PFN lock
112 //
113 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
114 do
115 {
116 //
117 // Things might've changed for us. Is the page still free?
118 //
119 if (MiIsPfnInUse(Pfn1)) break;
120
121 //
122 // So far so good. Is this the last confirmed valid page?
123 //
124 if (!--Length)
125 {
126 //
127 // Sanity check that we didn't go out of bounds
128 //
129 ASSERT(i != MmPhysicalMemoryBlock->NumberOfRuns);
130
131 //
132 // Loop until all PFN entries have been processed
133 //
134 EndPfn = Pfn1 - SizeInPages + 1;
135 do
136 {
137 //
138 // This PFN is now a used page, set it up
139 //
140 MI_SET_USAGE(MI_USAGE_CONTINOUS_ALLOCATION);
141 MI_SET_PROCESS2("Kernel Driver");
142 MiUnlinkFreeOrZeroedPage(Pfn1);
143 Pfn1->u3.e2.ReferenceCount = 1;
144 Pfn1->u2.ShareCount = 1;
145 Pfn1->u3.e1.PageLocation = ActiveAndValid;
146 Pfn1->u3.e1.StartOfAllocation = 0;
147 Pfn1->u3.e1.EndOfAllocation = 0;
148 Pfn1->u3.e1.PrototypePte = 0;
149 Pfn1->u4.VerifierAllocation = 0;
150 Pfn1->PteAddress = (PVOID)0xBAADF00D;
151
152 //
153 // Check if this is the last PFN, otherwise go on
154 //
155 if (Pfn1 == EndPfn) break;
156 Pfn1--;
157 } while (TRUE);
158
159 //
160 // Mark the first and last PFN so we can find them later
161 //
162 Pfn1->u3.e1.StartOfAllocation = 1;
163 (Pfn1 + SizeInPages - 1)->u3.e1.EndOfAllocation = 1;
164
165 //
166 // Now it's safe to let go of the PFN lock
167 //
168 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
169
170 //
171 // Quick sanity check that the last PFN is consistent
172 //
173 EndPfn = Pfn1 + SizeInPages;
174 ASSERT(EndPfn == MI_PFN_ELEMENT(Page + 1));
175
176 //
177 // Compute the first page, and make sure it's consistent
178 //
179 Page = Page - SizeInPages + 1;
180 ASSERT(Pfn1 == MI_PFN_ELEMENT(Page));
181 ASSERT(Page != 0);
182
183 /* Enable APCs and return the page */
184 KeLeaveGuardedRegion();
185 return Page;
186 }
187
188 //
189 // Keep going. The purpose of this loop is to reconfirm that
190 // after acquiring the PFN lock these pages are still usable
191 //
192 Pfn1++;
193 Page++;
194 } while (TRUE);
195
196 //
197 // If we got here, something changed while we hadn't acquired
198 // the PFN lock yet, so we'll have to restart
199 //
200 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
201 Length = 0;
202 }
203 }
204 } while (++i != MmPhysicalMemoryBlock->NumberOfRuns);
205
206 //
207 // And if we get here, it means no suitable physical memory runs were found
208 //
209 return 0;
210 }
211
212 PVOID
213 NTAPI
214 MiCheckForContiguousMemory(IN PVOID BaseAddress,
215 IN PFN_NUMBER BaseAddressPages,
216 IN PFN_NUMBER SizeInPages,
217 IN PFN_NUMBER LowestPfn,
218 IN PFN_NUMBER HighestPfn,
219 IN PFN_NUMBER BoundaryPfn,
220 IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute)
221 {
222 PMMPTE StartPte, EndPte;
223 PFN_NUMBER PreviousPage = 0, Page, HighPage, BoundaryMask, Pages = 0;
224
225 //
226 // Okay, first of all check if the PFNs match our restrictions
227 //
228 if (LowestPfn > HighestPfn) return NULL;
229 if (LowestPfn + SizeInPages <= LowestPfn) return NULL;
230 if (LowestPfn + SizeInPages - 1 > HighestPfn) return NULL;
231 if (BaseAddressPages < SizeInPages) return NULL;
232
233 //
234 // This is the last page we need to get to and the boundary requested
235 //
236 HighPage = HighestPfn + 1 - SizeInPages;
237 BoundaryMask = ~(BoundaryPfn - 1);
238
239 //
240 // And here's the PTEs for this allocation. Let's go scan them.
241 //
242 StartPte = MiAddressToPte(BaseAddress);
243 EndPte = StartPte + BaseAddressPages;
244 while (StartPte < EndPte)
245 {
246 //
247 // Get this PTE's page number
248 //
249 ASSERT (StartPte->u.Hard.Valid == 1);
250 Page = PFN_FROM_PTE(StartPte);
251
252 //
253 // Is this the beginning of our adventure?
254 //
255 if (!Pages)
256 {
257 //
258 // Check if this PFN is within our range
259 //
260 if ((Page >= LowestPfn) && (Page <= HighPage))
261 {
262 //
263 // It is! Do you care about boundary (alignment)?
264 //
265 if (!(BoundaryPfn) ||
266 (!((Page ^ (Page + SizeInPages - 1)) & BoundaryMask)))
267 {
268 //
269 // You don't care, or you do care but we deliver
270 //
271 Pages++;
272 }
273 }
274
275 //
276 // Have we found all the pages we need by now?
277 // Incidently, this means you only wanted one page
278 //
279 if (Pages == SizeInPages)
280 {
281 //
282 // Mission complete
283 //
284 return MiPteToAddress(StartPte);
285 }
286 }
287 else
288 {
289 //
290 // Have we found a page that doesn't seem to be contiguous?
291 //
292 if (Page != (PreviousPage + 1))
293 {
294 //
295 // Ah crap, we have to start over
296 //
297 Pages = 0;
298 continue;
299 }
300
301 //
302 // Otherwise, we're still in the game. Do we have all our pages?
303 //
304 if (++Pages == SizeInPages)
305 {
306 //
307 // We do! This entire range was contiguous, so we'll return it!
308 //
309 return MiPteToAddress(StartPte - Pages + 1);
310 }
311 }
312
313 //
314 // Try with the next PTE, remember this PFN
315 //
316 PreviousPage = Page;
317 StartPte++;
318 continue;
319 }
320
321 //
322 // All good returns are within the loop...
323 //
324 return NULL;
325 }
326
327 PVOID
328 NTAPI
329 MiFindContiguousMemory(IN PFN_NUMBER LowestPfn,
330 IN PFN_NUMBER HighestPfn,
331 IN PFN_NUMBER BoundaryPfn,
332 IN PFN_NUMBER SizeInPages,
333 IN MEMORY_CACHING_TYPE CacheType)
334 {
335 PFN_NUMBER Page;
336 PHYSICAL_ADDRESS PhysicalAddress;
337 PMMPFN Pfn1, EndPfn;
338 PMMPTE PointerPte;
339 PVOID BaseAddress;
340 PAGED_CODE();
341 ASSERT(SizeInPages != 0);
342
343 //
344 // Our last hope is to scan the free page list for contiguous pages
345 //
346 Page = MiFindContiguousPages(LowestPfn,
347 HighestPfn,
348 BoundaryPfn,
349 SizeInPages,
350 CacheType);
351 if (!Page) return NULL;
352
353 //
354 // We'll just piggyback on the I/O memory mapper
355 //
356 PhysicalAddress.QuadPart = Page << PAGE_SHIFT;
357 BaseAddress = MmMapIoSpace(PhysicalAddress, SizeInPages << PAGE_SHIFT, CacheType);
358 ASSERT(BaseAddress);
359
360 /* Loop the PFN entries */
361 Pfn1 = MiGetPfnEntry(Page);
362 EndPfn = Pfn1 + SizeInPages;
363 PointerPte = MiAddressToPte(BaseAddress);
364 do
365 {
366 /* Write the PTE address */
367 Pfn1->PteAddress = PointerPte;
368 Pfn1->u4.PteFrame = PFN_FROM_PTE(MiAddressToPte(PointerPte++));
369 } while (++Pfn1 < EndPfn);
370
371 /* Return the address */
372 return BaseAddress;
373 }
374
375 PVOID
376 NTAPI
377 MiAllocateContiguousMemory(IN SIZE_T NumberOfBytes,
378 IN PFN_NUMBER LowestAcceptablePfn,
379 IN PFN_NUMBER HighestAcceptablePfn,
380 IN PFN_NUMBER BoundaryPfn,
381 IN MEMORY_CACHING_TYPE CacheType)
382 {
383 PVOID BaseAddress;
384 PFN_NUMBER SizeInPages;
385 MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
386
387 //
388 // Verify count and cache type
389 //
390 ASSERT(NumberOfBytes != 0);
391 ASSERT(CacheType <= MmWriteCombined);
392
393 //
394 // Compute size requested
395 //
396 SizeInPages = BYTES_TO_PAGES(NumberOfBytes);
397
398 //
399 // Convert the cache attribute and check for cached requests
400 //
401 CacheAttribute = MiPlatformCacheAttributes[FALSE][CacheType];
402 if (CacheAttribute == MiCached)
403 {
404 //
405 // Because initial nonpaged pool is supposed to be contiguous, go ahead
406 // and try making a nonpaged pool allocation first.
407 //
408 BaseAddress = ExAllocatePoolWithTag(NonPagedPoolCacheAligned,
409 NumberOfBytes,
410 'mCmM');
411 if (BaseAddress)
412 {
413 //
414 // Now make sure it's actually contiguous (if it came from expansion
415 // it might not be).
416 //
417 if (MiCheckForContiguousMemory(BaseAddress,
418 SizeInPages,
419 SizeInPages,
420 LowestAcceptablePfn,
421 HighestAcceptablePfn,
422 BoundaryPfn,
423 CacheAttribute))
424 {
425 //
426 // Sweet, we're in business!
427 //
428 return BaseAddress;
429 }
430
431 //
432 // No such luck
433 //
434 ExFreePool(BaseAddress);
435 }
436 }
437
438 //
439 // According to MSDN, the system won't try anything else if you're higher
440 // than APC level.
441 //
442 if (KeGetCurrentIrql() > APC_LEVEL) return NULL;
443
444 //
445 // Otherwise, we'll go try to find some
446 //
447 return MiFindContiguousMemory(LowestAcceptablePfn,
448 HighestAcceptablePfn,
449 BoundaryPfn,
450 SizeInPages,
451 CacheType);
452 }
453
454 VOID
455 NTAPI
456 MiFreeContiguousMemory(IN PVOID BaseAddress)
457 {
458 KIRQL OldIrql;
459 PFN_NUMBER PageFrameIndex, LastPage, PageCount;
460 PMMPFN Pfn1, StartPfn;
461 PMMPTE PointerPte;
462 PAGED_CODE();
463
464 //
465 // First, check if the memory came from initial nonpaged pool, or expansion
466 //
467 if (((BaseAddress >= MmNonPagedPoolStart) &&
468 (BaseAddress < (PVOID)((ULONG_PTR)MmNonPagedPoolStart +
469 MmSizeOfNonPagedPoolInBytes))) ||
470 ((BaseAddress >= MmNonPagedPoolExpansionStart) &&
471 (BaseAddress < MmNonPagedPoolEnd)))
472 {
473 //
474 // It did, so just use the pool to free this
475 //
476 ExFreePool(BaseAddress);
477 return;
478 }
479
480 /* Get the PTE and frame number for the allocation*/
481 PointerPte = MiAddressToPte(BaseAddress);
482 PageFrameIndex = PFN_FROM_PTE(PointerPte);
483
484 //
485 // Now get the PFN entry for this, and make sure it's the correct one
486 //
487 Pfn1 = MiGetPfnEntry(PageFrameIndex);
488 if ((!Pfn1) || (Pfn1->u3.e1.StartOfAllocation == 0))
489 {
490 //
491 // This probably means you did a free on an address that was in between
492 //
493 KeBugCheckEx(BAD_POOL_CALLER,
494 0x60,
495 (ULONG_PTR)BaseAddress,
496 0,
497 0);
498 }
499
500 //
501 // Now this PFN isn't the start of any allocation anymore, it's going out
502 //
503 StartPfn = Pfn1;
504 Pfn1->u3.e1.StartOfAllocation = 0;
505
506 /* Loop the PFNs until we find the one that marks the end of the allocation */
507 do
508 {
509 /* Make sure these are the pages we setup in the allocation routine */
510 ASSERT(Pfn1->u3.e2.ReferenceCount == 1);
511 ASSERT(Pfn1->u2.ShareCount == 1);
512 ASSERT(Pfn1->PteAddress == PointerPte);
513 ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
514 ASSERT(Pfn1->u4.VerifierAllocation == 0);
515 ASSERT(Pfn1->u3.e1.PrototypePte == 0);
516
517 /* Set the special pending delete marker */
518 MI_SET_PFN_DELETED(Pfn1);
519
520 /* Keep going for assertions */
521 PointerPte++;
522 } while (Pfn1++->u3.e1.EndOfAllocation == 0);
523
524 //
525 // Found it, unmark it
526 //
527 Pfn1--;
528 Pfn1->u3.e1.EndOfAllocation = 0;
529
530 //
531 // Now compute how many pages this represents
532 //
533 PageCount = (ULONG)(Pfn1 - StartPfn + 1);
534
535 //
536 // So we can know how much to unmap (recall we piggyback on I/O mappings)
537 //
538 MmUnmapIoSpace(BaseAddress, PageCount << PAGE_SHIFT);
539
540 //
541 // Lock the PFN database
542 //
543 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
544
545 //
546 // Loop all the pages
547 //
548 LastPage = PageFrameIndex + PageCount;
549 Pfn1 = MiGetPfnEntry(PageFrameIndex);
550 do
551 {
552 /* Decrement the share count and move on */
553 MiDecrementShareCount(Pfn1++, PageFrameIndex++);
554 } while (PageFrameIndex < LastPage);
555
556 //
557 // Release the PFN lock
558 //
559 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
560 }
561
562 /* PUBLIC FUNCTIONS ***********************************************************/
563
564 /*
565 * @implemented
566 */
567 PVOID
568 NTAPI
569 MmAllocateContiguousMemorySpecifyCache(IN SIZE_T NumberOfBytes,
570 IN PHYSICAL_ADDRESS LowestAcceptableAddress OPTIONAL,
571 IN PHYSICAL_ADDRESS HighestAcceptableAddress,
572 IN PHYSICAL_ADDRESS BoundaryAddressMultiple OPTIONAL,
573 IN MEMORY_CACHING_TYPE CacheType OPTIONAL)
574 {
575 PFN_NUMBER LowestPfn, HighestPfn, BoundaryPfn;
576
577 //
578 // Verify count and cache type
579 //
580 ASSERT(NumberOfBytes != 0);
581 ASSERT(CacheType <= MmWriteCombined);
582
583 //
584 // Convert the lowest address into a PFN
585 //
586 LowestPfn = (PFN_NUMBER)(LowestAcceptableAddress.QuadPart >> PAGE_SHIFT);
587 if (BYTE_OFFSET(LowestAcceptableAddress.LowPart)) LowestPfn++;
588
589 //
590 // Convert and validate the boundary address into a PFN
591 //
592 if (BYTE_OFFSET(BoundaryAddressMultiple.LowPart)) return NULL;
593 BoundaryPfn = (PFN_NUMBER)(BoundaryAddressMultiple.QuadPart >> PAGE_SHIFT);
594
595 //
596 // Convert the highest address into a PFN
597 //
598 HighestPfn = (PFN_NUMBER)(HighestAcceptableAddress.QuadPart >> PAGE_SHIFT);
599 if (HighestPfn > MmHighestPhysicalPage) HighestPfn = MmHighestPhysicalPage;
600
601 //
602 // Validate the PFN bounds
603 //
604 if (LowestPfn > HighestPfn) return NULL;
605
606 //
607 // Let the contiguous memory allocator handle it
608 //
609 return MiAllocateContiguousMemory(NumberOfBytes,
610 LowestPfn,
611 HighestPfn,
612 BoundaryPfn,
613 CacheType);
614 }
615
616 /*
617 * @implemented
618 */
619 PVOID
620 NTAPI
621 MmAllocateContiguousMemory(IN SIZE_T NumberOfBytes,
622 IN PHYSICAL_ADDRESS HighestAcceptableAddress)
623 {
624 PFN_NUMBER HighestPfn;
625
626 //
627 // Verify byte count
628 //
629 ASSERT(NumberOfBytes != 0);
630
631 //
632 // Convert and normalize the highest address into a PFN
633 //
634 HighestPfn = (PFN_NUMBER)(HighestAcceptableAddress.QuadPart >> PAGE_SHIFT);
635 if (HighestPfn > MmHighestPhysicalPage) HighestPfn = MmHighestPhysicalPage;
636
637 //
638 // Let the contiguous memory allocator handle it
639 //
640 return MiAllocateContiguousMemory(NumberOfBytes, 0, HighestPfn, 0, MmCached);
641 }
642
643 /*
644 * @implemented
645 */
646 VOID
647 NTAPI
648 MmFreeContiguousMemory(IN PVOID BaseAddress)
649 {
650 //
651 // Let the contiguous memory allocator handle it
652 //
653 MiFreeContiguousMemory(BaseAddress);
654 }
655
656 /*
657 * @implemented
658 */
659 VOID
660 NTAPI
661 MmFreeContiguousMemorySpecifyCache(IN PVOID BaseAddress,
662 IN SIZE_T NumberOfBytes,
663 IN MEMORY_CACHING_TYPE CacheType)
664 {
665 //
666 // Just call the non-cached version (there's no cache issues for freeing)
667 //
668 MiFreeContiguousMemory(BaseAddress);
669 }
670
671 /* EOF */