0d7628bf88f8d77cea0af592667f6e7ad64f850d
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / contmem.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/contmem.c
5 * PURPOSE: ARM Memory Manager Contiguous Memory Allocator
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::CONTMEM"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
18
19 /* PRIVATE FUNCTIONS **********************************************************/
20
21 PFN_NUMBER
22 NTAPI
23 MiFindContiguousPages(IN PFN_NUMBER LowestPfn,
24 IN PFN_NUMBER HighestPfn,
25 IN PFN_NUMBER BoundaryPfn,
26 IN PFN_NUMBER SizeInPages,
27 IN MEMORY_CACHING_TYPE CacheType)
28 {
29 PFN_NUMBER Page, PageCount, LastPage, Length, BoundaryMask;
30 ULONG i = 0;
31 PMMPFN Pfn1, EndPfn;
32 KIRQL OldIrql;
33 PAGED_CODE ();
34 ASSERT(SizeInPages != 0);
35
36 //
37 // Convert the boundary PFN into an alignment mask
38 //
39 BoundaryMask = ~(BoundaryPfn - 1);
40
41 //
42 // Loop all the physical memory blocks
43 //
44 do
45 {
46 //
47 // Capture the base page and length of this memory block
48 //
49 Page = MmPhysicalMemoryBlock->Run[i].BasePage;
50 PageCount = MmPhysicalMemoryBlock->Run[i].PageCount;
51
52 //
53 // Check how far this memory block will go
54 //
55 LastPage = Page + PageCount;
56
57 //
58 // Trim it down to only the PFNs we're actually interested in
59 //
60 if ((LastPage - 1) > HighestPfn) LastPage = HighestPfn + 1;
61 if (Page < LowestPfn) Page = LowestPfn;
62
63 //
64 // Skip this run if it's empty or fails to contain all the pages we need
65 //
66 if (!(PageCount) || ((Page + SizeInPages) > LastPage)) continue;
67
68 //
69 // Now scan all the relevant PFNs in this run
70 //
71 Length = 0;
72 for (Pfn1 = MiGetPfnEntry(Page); Page < LastPage; Page++, Pfn1++)
73 {
74 //
75 // If this PFN is in use, ignore it
76 //
77 if (MiIsPfnInUse(Pfn1)) continue;
78
79 //
80 // If we haven't chosen a start PFN yet and the caller specified an
81 // alignment, make sure the page matches the alignment restriction
82 //
83 if ((!(Length) && (BoundaryPfn)) &&
84 (((Page ^ (Page + SizeInPages - 1)) & BoundaryMask)))
85 {
86 //
87 // It does not, so bail out
88 //
89 continue;
90 }
91
92 //
93 // Increase the number of valid pages, and check if we have enough
94 //
95 if (++Length == SizeInPages)
96 {
97 //
98 // It appears we've amassed enough legitimate pages, rollback
99 //
100 Pfn1 -= (Length - 1);
101 Page -= (Length - 1);
102
103 //
104 // Acquire the PFN lock
105 //
106 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
107 do
108 {
109 //
110 // Things might've changed for us. Is the page still free?
111 //
112 if (MiIsPfnInUse(Pfn1)) break;
113
114 //
115 // So far so good. Is this the last confirmed valid page?
116 //
117 if (!--Length)
118 {
119 //
120 // Sanity check that we didn't go out of bounds
121 //
122 ASSERT(i != MmPhysicalMemoryBlock->NumberOfRuns);
123
124 //
125 // Loop until all PFN entries have been processed
126 //
127 EndPfn = Pfn1 - SizeInPages + 1;
128 do
129 {
130 //
131 // This PFN is now a used page, set it up
132 //
133 MiUnlinkFreeOrZeroedPage(Pfn1);
134 Pfn1->u3.e2.ReferenceCount = 1;
135 Pfn1->u2.ShareCount = 1;
136 Pfn1->u3.e1.PageLocation = ActiveAndValid;
137 Pfn1->u3.e1.StartOfAllocation = 0;
138 Pfn1->u3.e1.EndOfAllocation = 0;
139 Pfn1->u3.e1.PrototypePte = 0;
140 Pfn1->u4.VerifierAllocation = 0;
141 Pfn1->PteAddress = (PVOID)0xBAADF00D;
142
143 //
144 // Check if this is the last PFN, otherwise go on
145 //
146 if (Pfn1 == EndPfn) break;
147 Pfn1--;
148 } while (TRUE);
149
150 //
151 // Mark the first and last PFN so we can find them later
152 //
153 Pfn1->u3.e1.StartOfAllocation = 1;
154 (Pfn1 + SizeInPages - 1)->u3.e1.EndOfAllocation = 1;
155
156 //
157 // Now it's safe to let go of the PFN lock
158 //
159 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
160
161 //
162 // Quick sanity check that the last PFN is consistent
163 //
164 EndPfn = Pfn1 + SizeInPages;
165 ASSERT(EndPfn == MiGetPfnEntry(Page + 1));
166
167 //
168 // Compute the first page, and make sure it's consistent
169 //
170 Page -= SizeInPages - 1;
171 ASSERT(Pfn1 == MiGetPfnEntry(Page));
172 ASSERT(Page != 0);
173 return Page;
174 }
175
176 //
177 // Keep going. The purpose of this loop is to reconfirm that
178 // after acquiring the PFN lock these pages are still usable
179 //
180 Pfn1++;
181 Page++;
182 } while (TRUE);
183
184 //
185 // If we got here, something changed while we hadn't acquired
186 // the PFN lock yet, so we'll have to restart
187 //
188 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
189 Length = 0;
190 }
191 }
192 } while (++i != MmPhysicalMemoryBlock->NumberOfRuns);
193
194 //
195 // And if we get here, it means no suitable physical memory runs were found
196 //
197 return 0;
198 }
199
200 PVOID
201 NTAPI
202 MiCheckForContiguousMemory(IN PVOID BaseAddress,
203 IN PFN_NUMBER BaseAddressPages,
204 IN PFN_NUMBER SizeInPages,
205 IN PFN_NUMBER LowestPfn,
206 IN PFN_NUMBER HighestPfn,
207 IN PFN_NUMBER BoundaryPfn,
208 IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute)
209 {
210 PMMPTE StartPte, EndPte;
211 PFN_NUMBER PreviousPage = 0, Page, HighPage, BoundaryMask, Pages = 0;
212
213 //
214 // Okay, first of all check if the PFNs match our restrictions
215 //
216 if (LowestPfn > HighestPfn) return NULL;
217 if (LowestPfn + SizeInPages <= LowestPfn) return NULL;
218 if (LowestPfn + SizeInPages - 1 > HighestPfn) return NULL;
219 if (BaseAddressPages < SizeInPages) return NULL;
220
221 //
222 // This is the last page we need to get to and the boundary requested
223 //
224 HighPage = HighestPfn + 1 - SizeInPages;
225 BoundaryMask = ~(BoundaryPfn - 1);
226
227 //
228 // And here's the PTEs for this allocation. Let's go scan them.
229 //
230 StartPte = MiAddressToPte(BaseAddress);
231 EndPte = StartPte + BaseAddressPages;
232 while (StartPte < EndPte)
233 {
234 //
235 // Get this PTE's page number
236 //
237 ASSERT (StartPte->u.Hard.Valid == 1);
238 Page = PFN_FROM_PTE(StartPte);
239
240 //
241 // Is this the beginning of our adventure?
242 //
243 if (!Pages)
244 {
245 //
246 // Check if this PFN is within our range
247 //
248 if ((Page >= LowestPfn) && (Page <= HighPage))
249 {
250 //
251 // It is! Do you care about boundary (alignment)?
252 //
253 if (!(BoundaryPfn) ||
254 (!((Page ^ (Page + SizeInPages - 1)) & BoundaryMask)))
255 {
256 //
257 // You don't care, or you do care but we deliver
258 //
259 Pages++;
260 }
261 }
262
263 //
264 // Have we found all the pages we need by now?
265 // Incidently, this means you only wanted one page
266 //
267 if (Pages == SizeInPages)
268 {
269 //
270 // Mission complete
271 //
272 return MiPteToAddress(StartPte);
273 }
274 }
275 else
276 {
277 //
278 // Have we found a page that doesn't seem to be contiguous?
279 //
280 if (Page != (PreviousPage + 1))
281 {
282 //
283 // Ah crap, we have to start over
284 //
285 Pages = 0;
286 continue;
287 }
288
289 //
290 // Otherwise, we're still in the game. Do we have all our pages?
291 //
292 if (++Pages == SizeInPages)
293 {
294 //
295 // We do! This entire range was contiguous, so we'll return it!
296 //
297 return MiPteToAddress(StartPte - Pages + 1);
298 }
299 }
300
301 //
302 // Try with the next PTE, remember this PFN
303 //
304 PreviousPage = Page;
305 StartPte++;
306 continue;
307 }
308
309 //
310 // All good returns are within the loop...
311 //
312 return NULL;
313 }
314
315 PVOID
316 NTAPI
317 MiFindContiguousMemory(IN PFN_NUMBER LowestPfn,
318 IN PFN_NUMBER HighestPfn,
319 IN PFN_NUMBER BoundaryPfn,
320 IN PFN_NUMBER SizeInPages,
321 IN MEMORY_CACHING_TYPE CacheType)
322 {
323 PFN_NUMBER Page;
324 PHYSICAL_ADDRESS PhysicalAddress;
325 PMMPFN Pfn1, EndPfn;
326 PMMPTE PointerPte;
327 PVOID BaseAddress;
328 PAGED_CODE();
329 ASSERT(SizeInPages != 0);
330
331 //
332 // Our last hope is to scan the free page list for contiguous pages
333 //
334 Page = MiFindContiguousPages(LowestPfn,
335 HighestPfn,
336 BoundaryPfn,
337 SizeInPages,
338 CacheType);
339 if (!Page) return NULL;
340
341 //
342 // We'll just piggyback on the I/O memory mapper
343 //
344 PhysicalAddress.QuadPart = Page << PAGE_SHIFT;
345 BaseAddress = MmMapIoSpace(PhysicalAddress, SizeInPages << PAGE_SHIFT, CacheType);
346 ASSERT(BaseAddress);
347
348 /* Loop the PFN entries */
349 Pfn1 = MiGetPfnEntry(Page);
350 EndPfn = Pfn1 + SizeInPages;
351 PointerPte = MiAddressToPte(BaseAddress);
352 do
353 {
354 /* Write the PTE address */
355 Pfn1->PteAddress = PointerPte++;
356 Pfn1->u4.PteFrame = PFN_FROM_PTE(MiAddressToPte(PointerPte));
357 } while (Pfn1++ < EndPfn);
358
359 /* Return the address */
360 return BaseAddress;
361 }
362
363 PVOID
364 NTAPI
365 MiAllocateContiguousMemory(IN SIZE_T NumberOfBytes,
366 IN PFN_NUMBER LowestAcceptablePfn,
367 IN PFN_NUMBER HighestAcceptablePfn,
368 IN PFN_NUMBER BoundaryPfn,
369 IN MEMORY_CACHING_TYPE CacheType)
370 {
371 PVOID BaseAddress;
372 PFN_NUMBER SizeInPages;
373 MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
374
375 //
376 // Verify count and cache type
377 //
378 ASSERT(NumberOfBytes != 0);
379 ASSERT(CacheType <= MmWriteCombined);
380
381 //
382 // Compute size requested
383 //
384 SizeInPages = BYTES_TO_PAGES(NumberOfBytes);
385
386 //
387 // Convert the cache attribute and check for cached requests
388 //
389 CacheAttribute = MiPlatformCacheAttributes[FALSE][CacheType];
390 if (CacheAttribute == MiCached)
391 {
392 //
393 // Because initial nonpaged pool is supposed to be contiguous, go ahead
394 // and try making a nonpaged pool allocation first.
395 //
396 BaseAddress = ExAllocatePoolWithTag(NonPagedPoolCacheAligned,
397 NumberOfBytes,
398 'mCmM');
399 if (BaseAddress)
400 {
401 //
402 // Now make sure it's actually contiguous (if it came from expansion
403 // it might not be).
404 //
405 if (MiCheckForContiguousMemory(BaseAddress,
406 SizeInPages,
407 SizeInPages,
408 LowestAcceptablePfn,
409 HighestAcceptablePfn,
410 BoundaryPfn,
411 CacheAttribute))
412 {
413 //
414 // Sweet, we're in business!
415 //
416 return BaseAddress;
417 }
418
419 //
420 // No such luck
421 //
422 ExFreePool(BaseAddress);
423 }
424 }
425
426 //
427 // According to MSDN, the system won't try anything else if you're higher
428 // than APC level.
429 //
430 if (KeGetCurrentIrql() > APC_LEVEL) return NULL;
431
432 //
433 // Otherwise, we'll go try to find some
434 //
435 return MiFindContiguousMemory(LowestAcceptablePfn,
436 HighestAcceptablePfn,
437 BoundaryPfn,
438 SizeInPages,
439 CacheType);
440 }
441
442 VOID
443 NTAPI
444 MiFreeContiguousMemory(IN PVOID BaseAddress)
445 {
446 KIRQL OldIrql;
447 PFN_NUMBER PageFrameIndex, LastPage, PageCount;
448 PMMPFN Pfn1, StartPfn;
449 PMMPTE PointerPte;
450 PAGED_CODE();
451
452 //
453 // First, check if the memory came from initial nonpaged pool, or expansion
454 //
455 if (((BaseAddress >= MmNonPagedPoolStart) &&
456 (BaseAddress < (PVOID)((ULONG_PTR)MmNonPagedPoolStart +
457 MmSizeOfNonPagedPoolInBytes))) ||
458 ((BaseAddress >= MmNonPagedPoolExpansionStart) &&
459 (BaseAddress < MmNonPagedPoolEnd)))
460 {
461 //
462 // It did, so just use the pool to free this
463 //
464 ExFreePool(BaseAddress);
465 return;
466 }
467
468 /* Get the PTE and frame number for the allocation*/
469 PointerPte = MiAddressToPte(BaseAddress);
470 PageFrameIndex = PFN_FROM_PTE(PointerPte);
471
472 //
473 // Now get the PFN entry for this, and make sure it's the correct one
474 //
475 Pfn1 = MiGetPfnEntry(PageFrameIndex);
476 if ((!Pfn1) || (Pfn1->u3.e1.StartOfAllocation == 0))
477 {
478 //
479 // This probably means you did a free on an address that was in between
480 //
481 KeBugCheckEx(BAD_POOL_CALLER,
482 0x60,
483 (ULONG_PTR)BaseAddress,
484 0,
485 0);
486 }
487
488 //
489 // Now this PFN isn't the start of any allocation anymore, it's going out
490 //
491 StartPfn = Pfn1;
492 Pfn1->u3.e1.StartOfAllocation = 0;
493
494 /* Loop the PFNs until we find the one that marks the end of the allocation */
495 do
496 {
497 /* Make sure these are the pages we setup in the allocation routine */
498 ASSERT(Pfn1->u3.e2.ReferenceCount == 1);
499 ASSERT(Pfn1->u2.ShareCount == 1);
500 ASSERT(Pfn1->PteAddress == PointerPte);
501 ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
502 ASSERT(Pfn1->u4.VerifierAllocation == 0);
503 ASSERT(Pfn1->u3.e1.PrototypePte == 0);
504
505 /* Keep going for assertions */
506 PointerPte++;
507 } while (Pfn1++->u3.e1.EndOfAllocation == 0);
508
509 //
510 // Found it, unmark it
511 //
512 Pfn1--;
513 Pfn1->u3.e1.EndOfAllocation = 0;
514
515 //
516 // Now compute how many pages this represents
517 //
518 PageCount = (ULONG)(Pfn1 - StartPfn + 1);
519
520 //
521 // So we can know how much to unmap (recall we piggyback on I/O mappings)
522 //
523 MmUnmapIoSpace(BaseAddress, PageCount << PAGE_SHIFT);
524
525 //
526 // Lock the PFN database
527 //
528 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
529
530 //
531 // Loop all the pages
532 //
533 LastPage = PageFrameIndex + PageCount;
534 do
535 {
536 //
537 // Free each one, and move on
538 //
539 MmReleasePageMemoryConsumer(MC_NPPOOL, PageFrameIndex++);
540 } while (PageFrameIndex < LastPage);
541
542 //
543 // Release the PFN lock
544 //
545 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
546 }
547
548 /* PUBLIC FUNCTIONS ***********************************************************/
549
550 /*
551 * @implemented
552 */
553 PVOID
554 NTAPI
555 MmAllocateContiguousMemorySpecifyCache(IN SIZE_T NumberOfBytes,
556 IN PHYSICAL_ADDRESS LowestAcceptableAddress OPTIONAL,
557 IN PHYSICAL_ADDRESS HighestAcceptableAddress,
558 IN PHYSICAL_ADDRESS BoundaryAddressMultiple OPTIONAL,
559 IN MEMORY_CACHING_TYPE CacheType OPTIONAL)
560 {
561 PFN_NUMBER LowestPfn, HighestPfn, BoundaryPfn;
562
563 //
564 // Verify count and cache type
565 //
566 ASSERT(NumberOfBytes != 0);
567 ASSERT(CacheType <= MmWriteCombined);
568
569 //
570 // Convert the lowest address into a PFN
571 //
572 LowestPfn = (PFN_NUMBER)(LowestAcceptableAddress.QuadPart >> PAGE_SHIFT);
573 if (BYTE_OFFSET(LowestAcceptableAddress.LowPart)) LowestPfn++;
574
575 //
576 // Convert and validate the boundary address into a PFN
577 //
578 if (BYTE_OFFSET(BoundaryAddressMultiple.LowPart)) return NULL;
579 BoundaryPfn = (PFN_NUMBER)(BoundaryAddressMultiple.QuadPart >> PAGE_SHIFT);
580
581 //
582 // Convert the highest address into a PFN
583 //
584 HighestPfn = (PFN_NUMBER)(HighestAcceptableAddress.QuadPart >> PAGE_SHIFT);
585 if (HighestPfn > MmHighestPhysicalPage) HighestPfn = MmHighestPhysicalPage;
586
587 //
588 // Validate the PFN bounds
589 //
590 if (LowestPfn > HighestPfn) return NULL;
591
592 //
593 // Let the contiguous memory allocator handle it
594 //
595 return MiAllocateContiguousMemory(NumberOfBytes,
596 LowestPfn,
597 HighestPfn,
598 BoundaryPfn,
599 CacheType);
600 }
601
602 /*
603 * @implemented
604 */
605 PVOID
606 NTAPI
607 MmAllocateContiguousMemory(IN ULONG NumberOfBytes,
608 IN PHYSICAL_ADDRESS HighestAcceptableAddress)
609 {
610 PFN_NUMBER HighestPfn;
611
612 //
613 // Verify byte count
614 //
615 ASSERT(NumberOfBytes != 0);
616
617 //
618 // Convert and normalize the highest address into a PFN
619 //
620 HighestPfn = (PFN_NUMBER)(HighestAcceptableAddress.QuadPart >> PAGE_SHIFT);
621 if (HighestPfn > MmHighestPhysicalPage) HighestPfn = MmHighestPhysicalPage;
622
623 //
624 // Let the contiguous memory allocator handle it
625 //
626 return MiAllocateContiguousMemory(NumberOfBytes, 0, HighestPfn, 0, MmCached);
627 }
628
629 /*
630 * @implemented
631 */
632 VOID
633 NTAPI
634 MmFreeContiguousMemory(IN PVOID BaseAddress)
635 {
636 //
637 // Let the contiguous memory allocator handle it
638 //
639 MiFreeContiguousMemory(BaseAddress);
640 }
641
642 /*
643 * @implemented
644 */
645 VOID
646 NTAPI
647 MmFreeContiguousMemorySpecifyCache(IN PVOID BaseAddress,
648 IN ULONG NumberOfBytes,
649 IN MEMORY_CACHING_TYPE CacheType)
650 {
651 //
652 // Just call the non-cached version (there's no cache issues for freeing)
653 //
654 MiFreeContiguousMemory(BaseAddress);
655 }
656
657 /* EOF */