[NTOS:MM] Use inline functions to acquire/release the PFN lock.
[reactos.git] / ntoskrnl / mm / ARM3 / contmem.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/contmem.c
5 * PURPOSE: ARM Memory Manager Contiguous Memory Allocator
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
17
18 /* PRIVATE FUNCTIONS **********************************************************/
19
20 PFN_NUMBER
21 NTAPI
22 MiFindContiguousPages(IN PFN_NUMBER LowestPfn,
23 IN PFN_NUMBER HighestPfn,
24 IN PFN_NUMBER BoundaryPfn,
25 IN PFN_NUMBER SizeInPages,
26 IN MEMORY_CACHING_TYPE CacheType)
27 {
28 PFN_NUMBER Page, PageCount, LastPage, Length, BoundaryMask;
29 ULONG i = 0;
30 PMMPFN Pfn1, EndPfn;
31 KIRQL OldIrql;
32 PAGED_CODE();
33 ASSERT(SizeInPages != 0);
34
35 //
36 // Convert the boundary PFN into an alignment mask
37 //
38 BoundaryMask = ~(BoundaryPfn - 1);
39
40 /* Disable APCs */
41 KeEnterGuardedRegion();
42
43 //
44 // Loop all the physical memory blocks
45 //
46 do
47 {
48 //
49 // Capture the base page and length of this memory block
50 //
51 Page = MmPhysicalMemoryBlock->Run[i].BasePage;
52 PageCount = MmPhysicalMemoryBlock->Run[i].PageCount;
53
54 //
55 // Check how far this memory block will go
56 //
57 LastPage = Page + PageCount;
58
59 //
60 // Trim it down to only the PFNs we're actually interested in
61 //
62 if ((LastPage - 1) > HighestPfn) LastPage = HighestPfn + 1;
63 if (Page < LowestPfn) Page = LowestPfn;
64
65 //
66 // Skip this run if it's empty or fails to contain all the pages we need
67 //
68 if (!(PageCount) || ((Page + SizeInPages) > LastPage)) continue;
69
70 //
71 // Now scan all the relevant PFNs in this run
72 //
73 Length = 0;
74 for (Pfn1 = MI_PFN_ELEMENT(Page); Page < LastPage; Page++, Pfn1++)
75 {
76 //
77 // If this PFN is in use, ignore it
78 //
79 if (MiIsPfnInUse(Pfn1))
80 {
81 Length = 0;
82 continue;
83 }
84
85 //
86 // If we haven't chosen a start PFN yet and the caller specified an
87 // alignment, make sure the page matches the alignment restriction
88 //
89 if ((!(Length) && (BoundaryPfn)) &&
90 (((Page ^ (Page + SizeInPages - 1)) & BoundaryMask)))
91 {
92 //
93 // It does not, so bail out
94 //
95 continue;
96 }
97
98 //
99 // Increase the number of valid pages, and check if we have enough
100 //
101 if (++Length == SizeInPages)
102 {
103 //
104 // It appears we've amassed enough legitimate pages, rollback
105 //
106 Pfn1 -= (Length - 1);
107 Page -= (Length - 1);
108
109 //
110 // Acquire the PFN lock
111 //
112 OldIrql = MiAcquirePfnLock();
113 do
114 {
115 //
116 // Things might've changed for us. Is the page still free?
117 //
118 if (MiIsPfnInUse(Pfn1)) break;
119
120 //
121 // So far so good. Is this the last confirmed valid page?
122 //
123 if (!--Length)
124 {
125 //
126 // Sanity check that we didn't go out of bounds
127 //
128 ASSERT(i != MmPhysicalMemoryBlock->NumberOfRuns);
129
130 //
131 // Loop until all PFN entries have been processed
132 //
133 EndPfn = Pfn1 - SizeInPages + 1;
134 do
135 {
136 //
137 // This PFN is now a used page, set it up
138 //
139 MI_SET_USAGE(MI_USAGE_CONTINOUS_ALLOCATION);
140 MI_SET_PROCESS2("Kernel Driver");
141 MiUnlinkFreeOrZeroedPage(Pfn1);
142 Pfn1->u3.e2.ReferenceCount = 1;
143 Pfn1->u2.ShareCount = 1;
144 Pfn1->u3.e1.PageLocation = ActiveAndValid;
145 Pfn1->u3.e1.StartOfAllocation = 0;
146 Pfn1->u3.e1.EndOfAllocation = 0;
147 Pfn1->u3.e1.PrototypePte = 0;
148 Pfn1->u4.VerifierAllocation = 0;
149 Pfn1->PteAddress = (PVOID)0xBAADF00D;
150
151 //
152 // Check if this is the last PFN, otherwise go on
153 //
154 if (Pfn1 == EndPfn) break;
155 Pfn1--;
156 } while (TRUE);
157
158 //
159 // Mark the first and last PFN so we can find them later
160 //
161 Pfn1->u3.e1.StartOfAllocation = 1;
162 (Pfn1 + SizeInPages - 1)->u3.e1.EndOfAllocation = 1;
163
164 //
165 // Now it's safe to let go of the PFN lock
166 //
167 MiReleasePfnLock(OldIrql);
168
169 //
170 // Quick sanity check that the last PFN is consistent
171 //
172 EndPfn = Pfn1 + SizeInPages;
173 ASSERT(EndPfn == MI_PFN_ELEMENT(Page + 1));
174
175 //
176 // Compute the first page, and make sure it's consistent
177 //
178 Page = Page - SizeInPages + 1;
179 ASSERT(Pfn1 == MI_PFN_ELEMENT(Page));
180 ASSERT(Page != 0);
181
182 /* Enable APCs and return the page */
183 KeLeaveGuardedRegion();
184 return Page;
185 }
186
187 //
188 // Keep going. The purpose of this loop is to reconfirm that
189 // after acquiring the PFN lock these pages are still usable
190 //
191 Pfn1++;
192 Page++;
193 } while (TRUE);
194
195 //
196 // If we got here, something changed while we hadn't acquired
197 // the PFN lock yet, so we'll have to restart
198 //
199 MiReleasePfnLock(OldIrql);
200 Length = 0;
201 }
202 }
203 } while (++i != MmPhysicalMemoryBlock->NumberOfRuns);
204
205 //
206 // And if we get here, it means no suitable physical memory runs were found
207 //
208 KeLeaveGuardedRegion();
209 return 0;
210 }
211
212 PVOID
213 NTAPI
214 MiCheckForContiguousMemory(IN PVOID BaseAddress,
215 IN PFN_NUMBER BaseAddressPages,
216 IN PFN_NUMBER SizeInPages,
217 IN PFN_NUMBER LowestPfn,
218 IN PFN_NUMBER HighestPfn,
219 IN PFN_NUMBER BoundaryPfn,
220 IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute)
221 {
222 PMMPTE StartPte, EndPte;
223 PFN_NUMBER PreviousPage = 0, Page, HighPage, BoundaryMask, Pages = 0;
224
225 //
226 // Okay, first of all check if the PFNs match our restrictions
227 //
228 if (LowestPfn > HighestPfn) return NULL;
229 if (LowestPfn + SizeInPages <= LowestPfn) return NULL;
230 if (LowestPfn + SizeInPages - 1 > HighestPfn) return NULL;
231 if (BaseAddressPages < SizeInPages) return NULL;
232
233 //
234 // This is the last page we need to get to and the boundary requested
235 //
236 HighPage = HighestPfn + 1 - SizeInPages;
237 BoundaryMask = ~(BoundaryPfn - 1);
238
239 //
240 // And here's the PTEs for this allocation. Let's go scan them.
241 //
242 StartPte = MiAddressToPte(BaseAddress);
243 EndPte = StartPte + BaseAddressPages;
244 while (StartPte < EndPte)
245 {
246 //
247 // Get this PTE's page number
248 //
249 ASSERT (StartPte->u.Hard.Valid == 1);
250 Page = PFN_FROM_PTE(StartPte);
251
252 //
253 // Is this the beginning of our adventure?
254 //
255 if (!Pages)
256 {
257 //
258 // Check if this PFN is within our range
259 //
260 if ((Page >= LowestPfn) && (Page <= HighPage))
261 {
262 //
263 // It is! Do you care about boundary (alignment)?
264 //
265 if (!(BoundaryPfn) ||
266 (!((Page ^ (Page + SizeInPages - 1)) & BoundaryMask)))
267 {
268 //
269 // You don't care, or you do care but we deliver
270 //
271 Pages++;
272 }
273 }
274
275 //
276 // Have we found all the pages we need by now?
277 // Incidently, this means you only wanted one page
278 //
279 if (Pages == SizeInPages)
280 {
281 //
282 // Mission complete
283 //
284 return MiPteToAddress(StartPte);
285 }
286 }
287 else
288 {
289 //
290 // Have we found a page that doesn't seem to be contiguous?
291 //
292 if (Page != (PreviousPage + 1))
293 {
294 //
295 // Ah crap, we have to start over
296 //
297 Pages = 0;
298 continue;
299 }
300
301 //
302 // Otherwise, we're still in the game. Do we have all our pages?
303 //
304 if (++Pages == SizeInPages)
305 {
306 //
307 // We do! This entire range was contiguous, so we'll return it!
308 //
309 return MiPteToAddress(StartPte - Pages + 1);
310 }
311 }
312
313 //
314 // Try with the next PTE, remember this PFN
315 //
316 PreviousPage = Page;
317 StartPte++;
318 continue;
319 }
320
321 //
322 // All good returns are within the loop...
323 //
324 return NULL;
325 }
326
327 PVOID
328 NTAPI
329 MiFindContiguousMemory(IN PFN_NUMBER LowestPfn,
330 IN PFN_NUMBER HighestPfn,
331 IN PFN_NUMBER BoundaryPfn,
332 IN PFN_NUMBER SizeInPages,
333 IN MEMORY_CACHING_TYPE CacheType)
334 {
335 PFN_NUMBER Page;
336 PHYSICAL_ADDRESS PhysicalAddress;
337 PMMPFN Pfn1, EndPfn;
338 PMMPTE PointerPte;
339 PVOID BaseAddress;
340 PAGED_CODE();
341 ASSERT(SizeInPages != 0);
342
343 //
344 // Our last hope is to scan the free page list for contiguous pages
345 //
346 Page = MiFindContiguousPages(LowestPfn,
347 HighestPfn,
348 BoundaryPfn,
349 SizeInPages,
350 CacheType);
351 if (!Page) return NULL;
352
353 //
354 // We'll just piggyback on the I/O memory mapper
355 //
356 PhysicalAddress.QuadPart = Page << PAGE_SHIFT;
357 BaseAddress = MmMapIoSpace(PhysicalAddress, SizeInPages << PAGE_SHIFT, CacheType);
358 ASSERT(BaseAddress);
359
360 /* Loop the PFN entries */
361 Pfn1 = MiGetPfnEntry(Page);
362 EndPfn = Pfn1 + SizeInPages;
363 PointerPte = MiAddressToPte(BaseAddress);
364 do
365 {
366 /* Write the PTE address */
367 Pfn1->PteAddress = PointerPte;
368 Pfn1->u4.PteFrame = PFN_FROM_PTE(MiAddressToPte(PointerPte++));
369 } while (++Pfn1 < EndPfn);
370
371 /* Return the address */
372 return BaseAddress;
373 }
374
375 PVOID
376 NTAPI
377 MiAllocateContiguousMemory(IN SIZE_T NumberOfBytes,
378 IN PFN_NUMBER LowestAcceptablePfn,
379 IN PFN_NUMBER HighestAcceptablePfn,
380 IN PFN_NUMBER BoundaryPfn,
381 IN MEMORY_CACHING_TYPE CacheType)
382 {
383 PVOID BaseAddress;
384 PFN_NUMBER SizeInPages;
385 MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
386
387 //
388 // Verify count and cache type
389 //
390 ASSERT(NumberOfBytes != 0);
391 ASSERT(CacheType <= MmWriteCombined);
392
393 //
394 // Compute size requested
395 //
396 SizeInPages = BYTES_TO_PAGES(NumberOfBytes);
397
398 //
399 // Convert the cache attribute and check for cached requests
400 //
401 CacheAttribute = MiPlatformCacheAttributes[FALSE][CacheType];
402 if (CacheAttribute == MiCached)
403 {
404 //
405 // Because initial nonpaged pool is supposed to be contiguous, go ahead
406 // and try making a nonpaged pool allocation first.
407 //
408 BaseAddress = ExAllocatePoolWithTag(NonPagedPoolCacheAligned,
409 NumberOfBytes,
410 'mCmM');
411 if (BaseAddress)
412 {
413 //
414 // Now make sure it's actually contiguous (if it came from expansion
415 // it might not be).
416 //
417 if (MiCheckForContiguousMemory(BaseAddress,
418 SizeInPages,
419 SizeInPages,
420 LowestAcceptablePfn,
421 HighestAcceptablePfn,
422 BoundaryPfn,
423 CacheAttribute))
424 {
425 //
426 // Sweet, we're in business!
427 //
428 return BaseAddress;
429 }
430
431 //
432 // No such luck
433 //
434 ExFreePoolWithTag(BaseAddress, 'mCmM');
435 }
436 }
437
438 //
439 // According to MSDN, the system won't try anything else if you're higher
440 // than APC level.
441 //
442 if (KeGetCurrentIrql() > APC_LEVEL) return NULL;
443
444 //
445 // Otherwise, we'll go try to find some
446 //
447 return MiFindContiguousMemory(LowestAcceptablePfn,
448 HighestAcceptablePfn,
449 BoundaryPfn,
450 SizeInPages,
451 CacheType);
452 }
453
454 VOID
455 NTAPI
456 MiFreeContiguousMemory(IN PVOID BaseAddress)
457 {
458 KIRQL OldIrql;
459 PFN_NUMBER PageFrameIndex, LastPage, PageCount;
460 PMMPFN Pfn1, StartPfn;
461 PMMPTE PointerPte;
462 PAGED_CODE();
463
464 //
465 // First, check if the memory came from initial nonpaged pool, or expansion
466 //
467 if (((BaseAddress >= MmNonPagedPoolStart) &&
468 (BaseAddress < (PVOID)((ULONG_PTR)MmNonPagedPoolStart +
469 MmSizeOfNonPagedPoolInBytes))) ||
470 ((BaseAddress >= MmNonPagedPoolExpansionStart) &&
471 (BaseAddress < MmNonPagedPoolEnd)))
472 {
473 //
474 // It did, so just use the pool to free this
475 //
476 ExFreePoolWithTag(BaseAddress, 'mCmM');
477 return;
478 }
479
480 /* Get the PTE and frame number for the allocation*/
481 PointerPte = MiAddressToPte(BaseAddress);
482 PageFrameIndex = PFN_FROM_PTE(PointerPte);
483
484 //
485 // Now get the PFN entry for this, and make sure it's the correct one
486 //
487 Pfn1 = MiGetPfnEntry(PageFrameIndex);
488 if ((!Pfn1) || (Pfn1->u3.e1.StartOfAllocation == 0))
489 {
490 //
491 // This probably means you did a free on an address that was in between
492 //
493 KeBugCheckEx(BAD_POOL_CALLER,
494 0x60,
495 (ULONG_PTR)BaseAddress,
496 0,
497 0);
498 }
499
500 //
501 // Now this PFN isn't the start of any allocation anymore, it's going out
502 //
503 StartPfn = Pfn1;
504 Pfn1->u3.e1.StartOfAllocation = 0;
505
506 /* Loop the PFNs until we find the one that marks the end of the allocation */
507 do
508 {
509 /* Make sure these are the pages we setup in the allocation routine */
510 ASSERT(Pfn1->u3.e2.ReferenceCount == 1);
511 ASSERT(Pfn1->u2.ShareCount == 1);
512 ASSERT(Pfn1->PteAddress == PointerPte);
513 ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
514 ASSERT(Pfn1->u4.VerifierAllocation == 0);
515 ASSERT(Pfn1->u3.e1.PrototypePte == 0);
516
517 /* Set the special pending delete marker */
518 MI_SET_PFN_DELETED(Pfn1);
519
520 /* Keep going for assertions */
521 PointerPte++;
522 } while (Pfn1++->u3.e1.EndOfAllocation == 0);
523
524 //
525 // Found it, unmark it
526 //
527 Pfn1--;
528 Pfn1->u3.e1.EndOfAllocation = 0;
529
530 //
531 // Now compute how many pages this represents
532 //
533 PageCount = (ULONG)(Pfn1 - StartPfn + 1);
534
535 //
536 // So we can know how much to unmap (recall we piggyback on I/O mappings)
537 //
538 MmUnmapIoSpace(BaseAddress, PageCount << PAGE_SHIFT);
539
540 //
541 // Lock the PFN database
542 //
543 OldIrql = MiAcquirePfnLock();
544
545 //
546 // Loop all the pages
547 //
548 LastPage = PageFrameIndex + PageCount;
549 Pfn1 = MiGetPfnEntry(PageFrameIndex);
550 do
551 {
552 /* Decrement the share count and move on */
553 MiDecrementShareCount(Pfn1++, PageFrameIndex++);
554 } while (PageFrameIndex < LastPage);
555
556 //
557 // Release the PFN lock
558 //
559 MiReleasePfnLock(OldIrql);
560 }
561
562 /* PUBLIC FUNCTIONS ***********************************************************/
563
564 /*
565 * @implemented
566 */
567 PVOID
568 NTAPI
569 MmAllocateContiguousMemorySpecifyCache(IN SIZE_T NumberOfBytes,
570 IN PHYSICAL_ADDRESS LowestAcceptableAddress OPTIONAL,
571 IN PHYSICAL_ADDRESS HighestAcceptableAddress,
572 IN PHYSICAL_ADDRESS BoundaryAddressMultiple OPTIONAL,
573 IN MEMORY_CACHING_TYPE CacheType OPTIONAL)
574 {
575 PFN_NUMBER LowestPfn, HighestPfn, BoundaryPfn;
576
577 //
578 // Verify count and cache type
579 //
580 ASSERT(NumberOfBytes != 0);
581 ASSERT(CacheType <= MmWriteCombined);
582
583 //
584 // Convert the lowest address into a PFN
585 //
586 LowestPfn = (PFN_NUMBER)(LowestAcceptableAddress.QuadPart >> PAGE_SHIFT);
587 if (BYTE_OFFSET(LowestAcceptableAddress.LowPart)) LowestPfn++;
588
589 //
590 // Convert and validate the boundary address into a PFN
591 //
592 if (BYTE_OFFSET(BoundaryAddressMultiple.LowPart)) return NULL;
593 BoundaryPfn = (PFN_NUMBER)(BoundaryAddressMultiple.QuadPart >> PAGE_SHIFT);
594
595 //
596 // Convert the highest address into a PFN
597 //
598 HighestPfn = (PFN_NUMBER)(HighestAcceptableAddress.QuadPart >> PAGE_SHIFT);
599 if (HighestPfn > MmHighestPhysicalPage) HighestPfn = MmHighestPhysicalPage;
600
601 //
602 // Validate the PFN bounds
603 //
604 if (LowestPfn > HighestPfn) return NULL;
605
606 //
607 // Let the contiguous memory allocator handle it
608 //
609 return MiAllocateContiguousMemory(NumberOfBytes,
610 LowestPfn,
611 HighestPfn,
612 BoundaryPfn,
613 CacheType);
614 }
615
616 /*
617 * @implemented
618 */
619 PVOID
620 NTAPI
621 MmAllocateContiguousMemory(IN SIZE_T NumberOfBytes,
622 IN PHYSICAL_ADDRESS HighestAcceptableAddress)
623 {
624 PFN_NUMBER HighestPfn;
625
626 //
627 // Verify byte count
628 //
629 ASSERT(NumberOfBytes != 0);
630
631 //
632 // Convert and normalize the highest address into a PFN
633 //
634 HighestPfn = (PFN_NUMBER)(HighestAcceptableAddress.QuadPart >> PAGE_SHIFT);
635 if (HighestPfn > MmHighestPhysicalPage) HighestPfn = MmHighestPhysicalPage;
636
637 //
638 // Let the contiguous memory allocator handle it
639 //
640 return MiAllocateContiguousMemory(NumberOfBytes, 0, HighestPfn, 0, MmCached);
641 }
642
643 /*
644 * @implemented
645 */
646 VOID
647 NTAPI
648 MmFreeContiguousMemory(IN PVOID BaseAddress)
649 {
650 //
651 // Let the contiguous memory allocator handle it
652 //
653 MiFreeContiguousMemory(BaseAddress);
654 }
655
656 /*
657 * @implemented
658 */
659 VOID
660 NTAPI
661 MmFreeContiguousMemorySpecifyCache(IN PVOID BaseAddress,
662 IN SIZE_T NumberOfBytes,
663 IN MEMORY_CACHING_TYPE CacheType)
664 {
665 //
666 // Just call the non-cached version (there's no cache issues for freeing)
667 //
668 MiFreeContiguousMemory(BaseAddress);
669 }
670
671 /* EOF */