[NTOS]
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / contmem.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/contmem.c
5 * PURPOSE: ARM Memory Manager Contiguous Memory Allocator
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include "../ARM3/miarm.h"
17
18 /* PRIVATE FUNCTIONS **********************************************************/
19
20 PFN_NUMBER
21 NTAPI
22 MiFindContiguousPages(IN PFN_NUMBER LowestPfn,
23 IN PFN_NUMBER HighestPfn,
24 IN PFN_NUMBER BoundaryPfn,
25 IN PFN_NUMBER SizeInPages,
26 IN MEMORY_CACHING_TYPE CacheType)
27 {
28 PFN_NUMBER Page, PageCount, LastPage, Length, BoundaryMask;
29 ULONG i = 0;
30 PMMPFN Pfn1, EndPfn;
31 KIRQL OldIrql;
32 PAGED_CODE();
33 ASSERT(SizeInPages != 0);
34
35 //
36 // Convert the boundary PFN into an alignment mask
37 //
38 BoundaryMask = ~(BoundaryPfn - 1);
39
40 /* Disable APCs */
41 KeEnterGuardedRegion();
42
43 //
44 // Loop all the physical memory blocks
45 //
46 do
47 {
48 //
49 // Capture the base page and length of this memory block
50 //
51 Page = MmPhysicalMemoryBlock->Run[i].BasePage;
52 PageCount = MmPhysicalMemoryBlock->Run[i].PageCount;
53
54 //
55 // Check how far this memory block will go
56 //
57 LastPage = Page + PageCount;
58
59 //
60 // Trim it down to only the PFNs we're actually interested in
61 //
62 if ((LastPage - 1) > HighestPfn) LastPage = HighestPfn + 1;
63 if (Page < LowestPfn) Page = LowestPfn;
64
65 //
66 // Skip this run if it's empty or fails to contain all the pages we need
67 //
68 if (!(PageCount) || ((Page + SizeInPages) > LastPage)) continue;
69
70 //
71 // Now scan all the relevant PFNs in this run
72 //
73 Length = 0;
74 for (Pfn1 = MI_PFN_ELEMENT(Page); Page < LastPage; Page++, Pfn1++)
75 {
76 //
77 // If this PFN is in use, ignore it
78 //
79 if (MiIsPfnInUse(Pfn1))
80 {
81 Length = 0;
82 continue;
83 }
84
85 //
86 // If we haven't chosen a start PFN yet and the caller specified an
87 // alignment, make sure the page matches the alignment restriction
88 //
89 if ((!(Length) && (BoundaryPfn)) &&
90 (((Page ^ (Page + SizeInPages - 1)) & BoundaryMask)))
91 {
92 //
93 // It does not, so bail out
94 //
95 continue;
96 }
97
98 //
99 // Increase the number of valid pages, and check if we have enough
100 //
101 if (++Length == SizeInPages)
102 {
103 //
104 // It appears we've amassed enough legitimate pages, rollback
105 //
106 Pfn1 -= (Length - 1);
107 Page -= (Length - 1);
108
109 //
110 // Acquire the PFN lock
111 //
112 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
113 do
114 {
115 //
116 // Things might've changed for us. Is the page still free?
117 //
118 if (MiIsPfnInUse(Pfn1)) break;
119
120 //
121 // So far so good. Is this the last confirmed valid page?
122 //
123 if (!--Length)
124 {
125 //
126 // Sanity check that we didn't go out of bounds
127 //
128 ASSERT(i != MmPhysicalMemoryBlock->NumberOfRuns);
129
130 //
131 // Loop until all PFN entries have been processed
132 //
133 EndPfn = Pfn1 - SizeInPages + 1;
134 do
135 {
136 //
137 // This PFN is now a used page, set it up
138 //
139 MI_SET_USAGE(MI_USAGE_CONTINOUS_ALLOCATION);
140 MI_SET_PROCESS2("Kernel Driver");
141 MiUnlinkFreeOrZeroedPage(Pfn1);
142 Pfn1->u3.e2.ReferenceCount = 1;
143 Pfn1->u2.ShareCount = 1;
144 Pfn1->u3.e1.PageLocation = ActiveAndValid;
145 Pfn1->u3.e1.StartOfAllocation = 0;
146 Pfn1->u3.e1.EndOfAllocation = 0;
147 Pfn1->u3.e1.PrototypePte = 0;
148 Pfn1->u4.VerifierAllocation = 0;
149 Pfn1->PteAddress = (PVOID)0xBAADF00D;
150
151 //
152 // Check if this is the last PFN, otherwise go on
153 //
154 if (Pfn1 == EndPfn) break;
155 Pfn1--;
156 } while (TRUE);
157
158 //
159 // Mark the first and last PFN so we can find them later
160 //
161 Pfn1->u3.e1.StartOfAllocation = 1;
162 (Pfn1 + SizeInPages - 1)->u3.e1.EndOfAllocation = 1;
163
164 //
165 // Now it's safe to let go of the PFN lock
166 //
167 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
168
169 //
170 // Quick sanity check that the last PFN is consistent
171 //
172 EndPfn = Pfn1 + SizeInPages;
173 ASSERT(EndPfn == MI_PFN_ELEMENT(Page + 1));
174
175 //
176 // Compute the first page, and make sure it's consistent
177 //
178 Page = Page - SizeInPages + 1;
179 ASSERT(Pfn1 == MI_PFN_ELEMENT(Page));
180 ASSERT(Page != 0);
181
182 /* Enable APCs and return the page */
183 KeLeaveGuardedRegion();
184 return Page;
185 }
186
187 //
188 // Keep going. The purpose of this loop is to reconfirm that
189 // after acquiring the PFN lock these pages are still usable
190 //
191 Pfn1++;
192 Page++;
193 } while (TRUE);
194
195 //
196 // If we got here, something changed while we hadn't acquired
197 // the PFN lock yet, so we'll have to restart
198 //
199 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
200 Length = 0;
201 }
202 }
203 } while (++i != MmPhysicalMemoryBlock->NumberOfRuns);
204
205 //
206 // And if we get here, it means no suitable physical memory runs were found
207 //
208 return 0;
209 }
210
211 PVOID
212 NTAPI
213 MiCheckForContiguousMemory(IN PVOID BaseAddress,
214 IN PFN_NUMBER BaseAddressPages,
215 IN PFN_NUMBER SizeInPages,
216 IN PFN_NUMBER LowestPfn,
217 IN PFN_NUMBER HighestPfn,
218 IN PFN_NUMBER BoundaryPfn,
219 IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute)
220 {
221 PMMPTE StartPte, EndPte;
222 PFN_NUMBER PreviousPage = 0, Page, HighPage, BoundaryMask, Pages = 0;
223
224 //
225 // Okay, first of all check if the PFNs match our restrictions
226 //
227 if (LowestPfn > HighestPfn) return NULL;
228 if (LowestPfn + SizeInPages <= LowestPfn) return NULL;
229 if (LowestPfn + SizeInPages - 1 > HighestPfn) return NULL;
230 if (BaseAddressPages < SizeInPages) return NULL;
231
232 //
233 // This is the last page we need to get to and the boundary requested
234 //
235 HighPage = HighestPfn + 1 - SizeInPages;
236 BoundaryMask = ~(BoundaryPfn - 1);
237
238 //
239 // And here's the PTEs for this allocation. Let's go scan them.
240 //
241 StartPte = MiAddressToPte(BaseAddress);
242 EndPte = StartPte + BaseAddressPages;
243 while (StartPte < EndPte)
244 {
245 //
246 // Get this PTE's page number
247 //
248 ASSERT (StartPte->u.Hard.Valid == 1);
249 Page = PFN_FROM_PTE(StartPte);
250
251 //
252 // Is this the beginning of our adventure?
253 //
254 if (!Pages)
255 {
256 //
257 // Check if this PFN is within our range
258 //
259 if ((Page >= LowestPfn) && (Page <= HighPage))
260 {
261 //
262 // It is! Do you care about boundary (alignment)?
263 //
264 if (!(BoundaryPfn) ||
265 (!((Page ^ (Page + SizeInPages - 1)) & BoundaryMask)))
266 {
267 //
268 // You don't care, or you do care but we deliver
269 //
270 Pages++;
271 }
272 }
273
274 //
275 // Have we found all the pages we need by now?
276 // Incidently, this means you only wanted one page
277 //
278 if (Pages == SizeInPages)
279 {
280 //
281 // Mission complete
282 //
283 return MiPteToAddress(StartPte);
284 }
285 }
286 else
287 {
288 //
289 // Have we found a page that doesn't seem to be contiguous?
290 //
291 if (Page != (PreviousPage + 1))
292 {
293 //
294 // Ah crap, we have to start over
295 //
296 Pages = 0;
297 continue;
298 }
299
300 //
301 // Otherwise, we're still in the game. Do we have all our pages?
302 //
303 if (++Pages == SizeInPages)
304 {
305 //
306 // We do! This entire range was contiguous, so we'll return it!
307 //
308 return MiPteToAddress(StartPte - Pages + 1);
309 }
310 }
311
312 //
313 // Try with the next PTE, remember this PFN
314 //
315 PreviousPage = Page;
316 StartPte++;
317 continue;
318 }
319
320 //
321 // All good returns are within the loop...
322 //
323 return NULL;
324 }
325
326 PVOID
327 NTAPI
328 MiFindContiguousMemory(IN PFN_NUMBER LowestPfn,
329 IN PFN_NUMBER HighestPfn,
330 IN PFN_NUMBER BoundaryPfn,
331 IN PFN_NUMBER SizeInPages,
332 IN MEMORY_CACHING_TYPE CacheType)
333 {
334 PFN_NUMBER Page;
335 PHYSICAL_ADDRESS PhysicalAddress;
336 PMMPFN Pfn1, EndPfn;
337 PMMPTE PointerPte;
338 PVOID BaseAddress;
339 PAGED_CODE();
340 ASSERT(SizeInPages != 0);
341
342 //
343 // Our last hope is to scan the free page list for contiguous pages
344 //
345 Page = MiFindContiguousPages(LowestPfn,
346 HighestPfn,
347 BoundaryPfn,
348 SizeInPages,
349 CacheType);
350 if (!Page) return NULL;
351
352 //
353 // We'll just piggyback on the I/O memory mapper
354 //
355 PhysicalAddress.QuadPart = Page << PAGE_SHIFT;
356 BaseAddress = MmMapIoSpace(PhysicalAddress, SizeInPages << PAGE_SHIFT, CacheType);
357 ASSERT(BaseAddress);
358
359 /* Loop the PFN entries */
360 Pfn1 = MiGetPfnEntry(Page);
361 EndPfn = Pfn1 + SizeInPages;
362 PointerPte = MiAddressToPte(BaseAddress);
363 do
364 {
365 /* Write the PTE address */
366 Pfn1->PteAddress = PointerPte;
367 Pfn1->u4.PteFrame = PFN_FROM_PTE(MiAddressToPte(PointerPte++));
368 } while (++Pfn1 < EndPfn);
369
370 /* Return the address */
371 return BaseAddress;
372 }
373
374 PVOID
375 NTAPI
376 MiAllocateContiguousMemory(IN SIZE_T NumberOfBytes,
377 IN PFN_NUMBER LowestAcceptablePfn,
378 IN PFN_NUMBER HighestAcceptablePfn,
379 IN PFN_NUMBER BoundaryPfn,
380 IN MEMORY_CACHING_TYPE CacheType)
381 {
382 PVOID BaseAddress;
383 PFN_NUMBER SizeInPages;
384 MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
385
386 //
387 // Verify count and cache type
388 //
389 ASSERT(NumberOfBytes != 0);
390 ASSERT(CacheType <= MmWriteCombined);
391
392 //
393 // Compute size requested
394 //
395 SizeInPages = BYTES_TO_PAGES(NumberOfBytes);
396
397 //
398 // Convert the cache attribute and check for cached requests
399 //
400 CacheAttribute = MiPlatformCacheAttributes[FALSE][CacheType];
401 if (CacheAttribute == MiCached)
402 {
403 //
404 // Because initial nonpaged pool is supposed to be contiguous, go ahead
405 // and try making a nonpaged pool allocation first.
406 //
407 BaseAddress = ExAllocatePoolWithTag(NonPagedPoolCacheAligned,
408 NumberOfBytes,
409 'mCmM');
410 if (BaseAddress)
411 {
412 //
413 // Now make sure it's actually contiguous (if it came from expansion
414 // it might not be).
415 //
416 if (MiCheckForContiguousMemory(BaseAddress,
417 SizeInPages,
418 SizeInPages,
419 LowestAcceptablePfn,
420 HighestAcceptablePfn,
421 BoundaryPfn,
422 CacheAttribute))
423 {
424 //
425 // Sweet, we're in business!
426 //
427 return BaseAddress;
428 }
429
430 //
431 // No such luck
432 //
433 ExFreePoolWithTag(BaseAddress, 'mCmM');
434 }
435 }
436
437 //
438 // According to MSDN, the system won't try anything else if you're higher
439 // than APC level.
440 //
441 if (KeGetCurrentIrql() > APC_LEVEL) return NULL;
442
443 //
444 // Otherwise, we'll go try to find some
445 //
446 return MiFindContiguousMemory(LowestAcceptablePfn,
447 HighestAcceptablePfn,
448 BoundaryPfn,
449 SizeInPages,
450 CacheType);
451 }
452
453 VOID
454 NTAPI
455 MiFreeContiguousMemory(IN PVOID BaseAddress)
456 {
457 KIRQL OldIrql;
458 PFN_NUMBER PageFrameIndex, LastPage, PageCount;
459 PMMPFN Pfn1, StartPfn;
460 PMMPTE PointerPte;
461 PAGED_CODE();
462
463 //
464 // First, check if the memory came from initial nonpaged pool, or expansion
465 //
466 if (((BaseAddress >= MmNonPagedPoolStart) &&
467 (BaseAddress < (PVOID)((ULONG_PTR)MmNonPagedPoolStart +
468 MmSizeOfNonPagedPoolInBytes))) ||
469 ((BaseAddress >= MmNonPagedPoolExpansionStart) &&
470 (BaseAddress < MmNonPagedPoolEnd)))
471 {
472 //
473 // It did, so just use the pool to free this
474 //
475 ExFreePoolWithTag(BaseAddress, 'mCmM');
476 return;
477 }
478
479 /* Get the PTE and frame number for the allocation*/
480 PointerPte = MiAddressToPte(BaseAddress);
481 PageFrameIndex = PFN_FROM_PTE(PointerPte);
482
483 //
484 // Now get the PFN entry for this, and make sure it's the correct one
485 //
486 Pfn1 = MiGetPfnEntry(PageFrameIndex);
487 if ((!Pfn1) || (Pfn1->u3.e1.StartOfAllocation == 0))
488 {
489 //
490 // This probably means you did a free on an address that was in between
491 //
492 KeBugCheckEx(BAD_POOL_CALLER,
493 0x60,
494 (ULONG_PTR)BaseAddress,
495 0,
496 0);
497 }
498
499 //
500 // Now this PFN isn't the start of any allocation anymore, it's going out
501 //
502 StartPfn = Pfn1;
503 Pfn1->u3.e1.StartOfAllocation = 0;
504
505 /* Loop the PFNs until we find the one that marks the end of the allocation */
506 do
507 {
508 /* Make sure these are the pages we setup in the allocation routine */
509 ASSERT(Pfn1->u3.e2.ReferenceCount == 1);
510 ASSERT(Pfn1->u2.ShareCount == 1);
511 ASSERT(Pfn1->PteAddress == PointerPte);
512 ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
513 ASSERT(Pfn1->u4.VerifierAllocation == 0);
514 ASSERT(Pfn1->u3.e1.PrototypePte == 0);
515
516 /* Set the special pending delete marker */
517 MI_SET_PFN_DELETED(Pfn1);
518
519 /* Keep going for assertions */
520 PointerPte++;
521 } while (Pfn1++->u3.e1.EndOfAllocation == 0);
522
523 //
524 // Found it, unmark it
525 //
526 Pfn1--;
527 Pfn1->u3.e1.EndOfAllocation = 0;
528
529 //
530 // Now compute how many pages this represents
531 //
532 PageCount = (ULONG)(Pfn1 - StartPfn + 1);
533
534 //
535 // So we can know how much to unmap (recall we piggyback on I/O mappings)
536 //
537 MmUnmapIoSpace(BaseAddress, PageCount << PAGE_SHIFT);
538
539 //
540 // Lock the PFN database
541 //
542 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
543
544 //
545 // Loop all the pages
546 //
547 LastPage = PageFrameIndex + PageCount;
548 Pfn1 = MiGetPfnEntry(PageFrameIndex);
549 do
550 {
551 /* Decrement the share count and move on */
552 MiDecrementShareCount(Pfn1++, PageFrameIndex++);
553 } while (PageFrameIndex < LastPage);
554
555 //
556 // Release the PFN lock
557 //
558 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
559 }
560
561 /* PUBLIC FUNCTIONS ***********************************************************/
562
563 /*
564 * @implemented
565 */
566 PVOID
567 NTAPI
568 MmAllocateContiguousMemorySpecifyCache(IN SIZE_T NumberOfBytes,
569 IN PHYSICAL_ADDRESS LowestAcceptableAddress OPTIONAL,
570 IN PHYSICAL_ADDRESS HighestAcceptableAddress,
571 IN PHYSICAL_ADDRESS BoundaryAddressMultiple OPTIONAL,
572 IN MEMORY_CACHING_TYPE CacheType OPTIONAL)
573 {
574 PFN_NUMBER LowestPfn, HighestPfn, BoundaryPfn;
575
576 //
577 // Verify count and cache type
578 //
579 ASSERT(NumberOfBytes != 0);
580 ASSERT(CacheType <= MmWriteCombined);
581
582 //
583 // Convert the lowest address into a PFN
584 //
585 LowestPfn = (PFN_NUMBER)(LowestAcceptableAddress.QuadPart >> PAGE_SHIFT);
586 if (BYTE_OFFSET(LowestAcceptableAddress.LowPart)) LowestPfn++;
587
588 //
589 // Convert and validate the boundary address into a PFN
590 //
591 if (BYTE_OFFSET(BoundaryAddressMultiple.LowPart)) return NULL;
592 BoundaryPfn = (PFN_NUMBER)(BoundaryAddressMultiple.QuadPart >> PAGE_SHIFT);
593
594 //
595 // Convert the highest address into a PFN
596 //
597 HighestPfn = (PFN_NUMBER)(HighestAcceptableAddress.QuadPart >> PAGE_SHIFT);
598 if (HighestPfn > MmHighestPhysicalPage) HighestPfn = MmHighestPhysicalPage;
599
600 //
601 // Validate the PFN bounds
602 //
603 if (LowestPfn > HighestPfn) return NULL;
604
605 //
606 // Let the contiguous memory allocator handle it
607 //
608 return MiAllocateContiguousMemory(NumberOfBytes,
609 LowestPfn,
610 HighestPfn,
611 BoundaryPfn,
612 CacheType);
613 }
614
615 /*
616 * @implemented
617 */
618 PVOID
619 NTAPI
620 MmAllocateContiguousMemory(IN SIZE_T NumberOfBytes,
621 IN PHYSICAL_ADDRESS HighestAcceptableAddress)
622 {
623 PFN_NUMBER HighestPfn;
624
625 //
626 // Verify byte count
627 //
628 ASSERT(NumberOfBytes != 0);
629
630 //
631 // Convert and normalize the highest address into a PFN
632 //
633 HighestPfn = (PFN_NUMBER)(HighestAcceptableAddress.QuadPart >> PAGE_SHIFT);
634 if (HighestPfn > MmHighestPhysicalPage) HighestPfn = MmHighestPhysicalPage;
635
636 //
637 // Let the contiguous memory allocator handle it
638 //
639 return MiAllocateContiguousMemory(NumberOfBytes, 0, HighestPfn, 0, MmCached);
640 }
641
642 /*
643 * @implemented
644 */
645 VOID
646 NTAPI
647 MmFreeContiguousMemory(IN PVOID BaseAddress)
648 {
649 //
650 // Let the contiguous memory allocator handle it
651 //
652 MiFreeContiguousMemory(BaseAddress);
653 }
654
655 /*
656 * @implemented
657 */
658 VOID
659 NTAPI
660 MmFreeContiguousMemorySpecifyCache(IN PVOID BaseAddress,
661 IN SIZE_T NumberOfBytes,
662 IN MEMORY_CACHING_TYPE CacheType)
663 {
664 //
665 // Just call the non-cached version (there's no cache issues for freeing)
666 //
667 MiFreeContiguousMemory(BaseAddress);
668 }
669
670 /* EOF */