[SHELL32_APITEST] Follow-up to #6796 (25e2f5f)
[reactos.git] / ntoskrnl / mm / ARM3 / contmem.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/contmem.c
5 * PURPOSE: ARM Memory Manager Contiguous Memory Allocator
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
17
18 /* PRIVATE FUNCTIONS **********************************************************/
19
20 PFN_NUMBER
21 NTAPI
22 MiFindContiguousPages(IN PFN_NUMBER LowestPfn,
23 IN PFN_NUMBER HighestPfn,
24 IN PFN_NUMBER BoundaryPfn,
25 IN PFN_NUMBER SizeInPages,
26 IN MEMORY_CACHING_TYPE CacheType)
27 {
28 PFN_NUMBER Page, PageCount, LastPage, Length, BoundaryMask;
29 ULONG i = 0;
30 PMMPFN Pfn1, EndPfn;
31 KIRQL OldIrql;
32 PAGED_CODE();
33 ASSERT(SizeInPages != 0);
34
35 //
36 // Convert the boundary PFN into an alignment mask
37 //
38 BoundaryMask = ~(BoundaryPfn - 1);
39
40 /* Disable APCs */
41 KeEnterGuardedRegion();
42
43 //
44 // Loop all the physical memory blocks
45 //
46 do
47 {
48 //
49 // Capture the base page and length of this memory block
50 //
51 Page = MmPhysicalMemoryBlock->Run[i].BasePage;
52 PageCount = MmPhysicalMemoryBlock->Run[i].PageCount;
53
54 //
55 // Check how far this memory block will go
56 //
57 LastPage = Page + PageCount;
58
59 //
60 // Trim it down to only the PFNs we're actually interested in
61 //
62 if ((LastPage - 1) > HighestPfn) LastPage = HighestPfn + 1;
63 if (Page < LowestPfn) Page = LowestPfn;
64
65 //
66 // Skip this run if it's empty or fails to contain all the pages we need
67 //
68 if (!(PageCount) || ((Page + SizeInPages) > LastPage)) continue;
69
70 //
71 // Now scan all the relevant PFNs in this run
72 //
73 Length = 0;
74 for (Pfn1 = MI_PFN_ELEMENT(Page); Page < LastPage; Page++, Pfn1++)
75 {
76 //
77 // If this PFN is in use, ignore it
78 //
79 if (MiIsPfnInUse(Pfn1))
80 {
81 Length = 0;
82 continue;
83 }
84
85 //
86 // If we haven't chosen a start PFN yet and the caller specified an
87 // alignment, make sure the page matches the alignment restriction
88 //
89 if ((!(Length) && (BoundaryPfn)) &&
90 (((Page ^ (Page + SizeInPages - 1)) & BoundaryMask)))
91 {
92 //
93 // It does not, so bail out
94 //
95 continue;
96 }
97
98 //
99 // Increase the number of valid pages, and check if we have enough
100 //
101 if (++Length == SizeInPages)
102 {
103 //
104 // It appears we've amassed enough legitimate pages, rollback
105 //
106 Pfn1 -= (Length - 1);
107 Page -= (Length - 1);
108
109 //
110 // Acquire the PFN lock
111 //
112 OldIrql = MiAcquirePfnLock();
113 do
114 {
115 //
116 // Things might've changed for us. Is the page still free?
117 //
118 if (MiIsPfnInUse(Pfn1)) break;
119
120 //
121 // So far so good. Is this the last confirmed valid page?
122 //
123 if (!--Length)
124 {
125 //
126 // Sanity check that we didn't go out of bounds
127 //
128 ASSERT(i != MmPhysicalMemoryBlock->NumberOfRuns);
129
130 //
131 // Loop until all PFN entries have been processed
132 //
133 EndPfn = Pfn1 - SizeInPages + 1;
134 do
135 {
136 //
137 // This PFN is now a used page, set it up
138 //
139 MI_SET_USAGE(MI_USAGE_CONTINOUS_ALLOCATION);
140 MI_SET_PROCESS2("Kernel Driver");
141 MiUnlinkFreeOrZeroedPage(Pfn1);
142 Pfn1->u3.e2.ReferenceCount = 1;
143 Pfn1->u2.ShareCount = 1;
144 Pfn1->u3.e1.PageLocation = ActiveAndValid;
145 Pfn1->u3.e1.StartOfAllocation = 0;
146 Pfn1->u3.e1.EndOfAllocation = 0;
147 Pfn1->u3.e1.PrototypePte = 0;
148 Pfn1->u4.VerifierAllocation = 0;
149 Pfn1->PteAddress = (PVOID)(ULONG_PTR)0xBAADF00DBAADF00DULL;
150
151 //
152 // Check if this is the last PFN, otherwise go on
153 //
154 if (Pfn1 == EndPfn) break;
155 Pfn1--;
156 } while (TRUE);
157
158 //
159 // Mark the first and last PFN so we can find them later
160 //
161 Pfn1->u3.e1.StartOfAllocation = 1;
162 (Pfn1 + SizeInPages - 1)->u3.e1.EndOfAllocation = 1;
163
164 //
165 // Now it's safe to let go of the PFN lock
166 //
167 MiReleasePfnLock(OldIrql);
168
169 //
170 // Quick sanity check that the last PFN is consistent
171 //
172 EndPfn = Pfn1 + SizeInPages;
173 ASSERT(EndPfn == MI_PFN_ELEMENT(Page + 1));
174
175 //
176 // Compute the first page, and make sure it's consistent
177 //
178 Page = Page - SizeInPages + 1;
179 ASSERT(Pfn1 == MI_PFN_ELEMENT(Page));
180 ASSERT(Page != 0);
181
182 /* Enable APCs and return the page */
183 KeLeaveGuardedRegion();
184 return Page;
185 }
186
187 //
188 // Keep going. The purpose of this loop is to reconfirm that
189 // after acquiring the PFN lock these pages are still usable
190 //
191 Pfn1++;
192 Page++;
193 } while (TRUE);
194
195 //
196 // If we got here, something changed while we hadn't acquired
197 // the PFN lock yet, so we'll have to restart
198 //
199 MiReleasePfnLock(OldIrql);
200 Length = 0;
201 }
202 }
203 } while (++i != MmPhysicalMemoryBlock->NumberOfRuns);
204
205 //
206 // And if we get here, it means no suitable physical memory runs were found
207 //
208 KeLeaveGuardedRegion();
209 return 0;
210 }
211
212 PVOID
213 NTAPI
214 MiCheckForContiguousMemory(IN PVOID BaseAddress,
215 IN PFN_NUMBER BaseAddressPages,
216 IN PFN_NUMBER SizeInPages,
217 IN PFN_NUMBER LowestPfn,
218 IN PFN_NUMBER HighestPfn,
219 IN PFN_NUMBER BoundaryPfn,
220 IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute)
221 {
222 PMMPTE StartPte, EndPte;
223 PFN_NUMBER PreviousPage = 0, Page, HighPage, BoundaryMask, Pages = 0;
224
225 //
226 // Okay, first of all check if the PFNs match our restrictions
227 //
228 if (LowestPfn > HighestPfn) return NULL;
229 if (LowestPfn + SizeInPages <= LowestPfn) return NULL;
230 if (LowestPfn + SizeInPages - 1 > HighestPfn) return NULL;
231 if (BaseAddressPages < SizeInPages) return NULL;
232
233 //
234 // This is the last page we need to get to and the boundary requested
235 //
236 HighPage = HighestPfn + 1 - SizeInPages;
237 BoundaryMask = ~(BoundaryPfn - 1);
238
239 //
240 // And here's the PTEs for this allocation. Let's go scan them.
241 //
242 StartPte = MiAddressToPte(BaseAddress);
243 EndPte = StartPte + BaseAddressPages;
244 while (StartPte < EndPte)
245 {
246 //
247 // Get this PTE's page number
248 //
249 ASSERT (StartPte->u.Hard.Valid == 1);
250 Page = PFN_FROM_PTE(StartPte);
251
252 //
253 // Is this the beginning of our adventure?
254 //
255 if (!Pages)
256 {
257 //
258 // Check if this PFN is within our range
259 //
260 if ((Page >= LowestPfn) && (Page <= HighPage))
261 {
262 //
263 // It is! Do you care about boundary (alignment)?
264 //
265 if (!(BoundaryPfn) ||
266 (!((Page ^ (Page + SizeInPages - 1)) & BoundaryMask)))
267 {
268 //
269 // You don't care, or you do care but we deliver
270 //
271 Pages++;
272 }
273 }
274
275 //
276 // Have we found all the pages we need by now?
277 // Incidently, this means you only wanted one page
278 //
279 if (Pages == SizeInPages)
280 {
281 //
282 // Mission complete
283 //
284 return MiPteToAddress(StartPte);
285 }
286 }
287 else
288 {
289 //
290 // Have we found a page that doesn't seem to be contiguous?
291 //
292 if (Page != (PreviousPage + 1))
293 {
294 //
295 // Ah crap, we have to start over
296 //
297 Pages = 0;
298 continue;
299 }
300
301 //
302 // Otherwise, we're still in the game. Do we have all our pages?
303 //
304 if (++Pages == SizeInPages)
305 {
306 //
307 // We do! This entire range was contiguous, so we'll return it!
308 //
309 return MiPteToAddress(StartPte - Pages + 1);
310 }
311 }
312
313 //
314 // Try with the next PTE, remember this PFN
315 //
316 PreviousPage = Page;
317 StartPte++;
318 continue;
319 }
320
321 //
322 // All good returns are within the loop...
323 //
324 return NULL;
325 }
326
327 PVOID
328 NTAPI
329 MiFindContiguousMemory(IN PFN_NUMBER LowestPfn,
330 IN PFN_NUMBER HighestPfn,
331 IN PFN_NUMBER BoundaryPfn,
332 IN PFN_NUMBER SizeInPages,
333 IN MEMORY_CACHING_TYPE CacheType)
334 {
335 PFN_NUMBER Page;
336 PHYSICAL_ADDRESS PhysicalAddress;
337 PMMPFN Pfn1, EndPfn;
338 PMMPTE PointerPte;
339 PVOID BaseAddress;
340 PAGED_CODE();
341 ASSERT(SizeInPages != 0);
342
343 //
344 // Our last hope is to scan the free page list for contiguous pages
345 //
346 Page = MiFindContiguousPages(LowestPfn,
347 HighestPfn,
348 BoundaryPfn,
349 SizeInPages,
350 CacheType);
351 if (!Page) return NULL;
352
353 //
354 // We'll just piggyback on the I/O memory mapper
355 //
356 PhysicalAddress.QuadPart = Page << PAGE_SHIFT;
357 BaseAddress = MmMapIoSpace(PhysicalAddress, SizeInPages << PAGE_SHIFT, CacheType);
358 ASSERT(BaseAddress);
359
360 /* Loop the PFN entries */
361 Pfn1 = MiGetPfnEntry(Page);
362 EndPfn = Pfn1 + SizeInPages;
363 PointerPte = MiAddressToPte(BaseAddress);
364 do
365 {
366 /* Write the PTE address */
367 Pfn1->PteAddress = PointerPte;
368 Pfn1->u4.PteFrame = PFN_FROM_PTE(MiAddressToPte(PointerPte++));
369 } while (++Pfn1 < EndPfn);
370
371 /* Return the address */
372 return BaseAddress;
373 }
374
375 PVOID
376 NTAPI
377 MiAllocateContiguousMemory(IN SIZE_T NumberOfBytes,
378 IN PFN_NUMBER LowestAcceptablePfn,
379 IN PFN_NUMBER HighestAcceptablePfn,
380 IN PFN_NUMBER BoundaryPfn,
381 IN MEMORY_CACHING_TYPE CacheType)
382 {
383 PVOID BaseAddress;
384 PFN_NUMBER SizeInPages;
385 MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
386
387 //
388 // Verify count and cache type
389 //
390 ASSERT(NumberOfBytes != 0);
391 ASSERT(CacheType <= MmWriteCombined);
392
393 //
394 // Compute size requested
395 //
396 SizeInPages = BYTES_TO_PAGES(NumberOfBytes);
397
398 //
399 // Convert the cache attribute and check for cached requests
400 //
401 CacheAttribute = MiPlatformCacheAttributes[FALSE][CacheType];
402 if (CacheAttribute == MiCached)
403 {
404 //
405 // Because initial nonpaged pool is supposed to be contiguous, go ahead
406 // and try making a nonpaged pool allocation first.
407 //
408 BaseAddress = ExAllocatePoolWithTag(NonPagedPoolCacheAligned,
409 NumberOfBytes,
410 'mCmM');
411 if (BaseAddress)
412 {
413 //
414 // Now make sure it's actually contiguous (if it came from expansion
415 // it might not be).
416 //
417 if (MiCheckForContiguousMemory(BaseAddress,
418 SizeInPages,
419 SizeInPages,
420 LowestAcceptablePfn,
421 HighestAcceptablePfn,
422 BoundaryPfn,
423 CacheAttribute))
424 {
425 //
426 // Sweet, we're in business!
427 //
428 return BaseAddress;
429 }
430
431 //
432 // No such luck
433 //
434 ExFreePoolWithTag(BaseAddress, 'mCmM');
435 }
436 }
437
438 //
439 // According to MSDN, the system won't try anything else if you're higher
440 // than APC level.
441 //
442 if (KeGetCurrentIrql() > APC_LEVEL) return NULL;
443
444 //
445 // Otherwise, we'll go try to find some
446 //
447 BaseAddress = MiFindContiguousMemory(LowestAcceptablePfn,
448 HighestAcceptablePfn,
449 BoundaryPfn,
450 SizeInPages,
451 CacheType);
452 if (!BaseAddress)
453 {
454 DPRINT1("Unable to allocate contiguous memory for %d bytes (%d pages), out of memory!\n", NumberOfBytes, SizeInPages);
455 }
456 return BaseAddress;
457 }
458
459 VOID
460 NTAPI
461 MiFreeContiguousMemory(IN PVOID BaseAddress)
462 {
463 KIRQL OldIrql;
464 PFN_NUMBER PageFrameIndex, LastPage, PageCount;
465 PMMPFN Pfn1, StartPfn;
466 PMMPTE PointerPte;
467 PAGED_CODE();
468
469 //
470 // First, check if the memory came from initial nonpaged pool, or expansion
471 //
472 if (((BaseAddress >= MmNonPagedPoolStart) &&
473 (BaseAddress < (PVOID)((ULONG_PTR)MmNonPagedPoolStart +
474 MmSizeOfNonPagedPoolInBytes))) ||
475 ((BaseAddress >= MmNonPagedPoolExpansionStart) &&
476 (BaseAddress < MmNonPagedPoolEnd)))
477 {
478 //
479 // It did, so just use the pool to free this
480 //
481 ExFreePoolWithTag(BaseAddress, 'mCmM');
482 return;
483 }
484
485 /* Get the PTE and frame number for the allocation*/
486 PointerPte = MiAddressToPte(BaseAddress);
487 PageFrameIndex = PFN_FROM_PTE(PointerPte);
488
489 //
490 // Now get the PFN entry for this, and make sure it's the correct one
491 //
492 Pfn1 = MiGetPfnEntry(PageFrameIndex);
493 if ((!Pfn1) || (Pfn1->u3.e1.StartOfAllocation == 0))
494 {
495 //
496 // This probably means you did a free on an address that was in between
497 //
498 KeBugCheckEx(BAD_POOL_CALLER,
499 0x60,
500 (ULONG_PTR)BaseAddress,
501 0,
502 0);
503 }
504
505 //
506 // Now this PFN isn't the start of any allocation anymore, it's going out
507 //
508 StartPfn = Pfn1;
509 Pfn1->u3.e1.StartOfAllocation = 0;
510
511 /* Loop the PFNs until we find the one that marks the end of the allocation */
512 do
513 {
514 /* Make sure these are the pages we setup in the allocation routine */
515 ASSERT(Pfn1->u3.e2.ReferenceCount == 1);
516 ASSERT(Pfn1->u2.ShareCount == 1);
517 ASSERT(Pfn1->PteAddress == PointerPte);
518 ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
519 ASSERT(Pfn1->u4.VerifierAllocation == 0);
520 ASSERT(Pfn1->u3.e1.PrototypePte == 0);
521
522 /* Set the special pending delete marker */
523 MI_SET_PFN_DELETED(Pfn1);
524
525 /* Keep going for assertions */
526 PointerPte++;
527 } while (Pfn1++->u3.e1.EndOfAllocation == 0);
528
529 //
530 // Found it, unmark it
531 //
532 Pfn1--;
533 Pfn1->u3.e1.EndOfAllocation = 0;
534
535 //
536 // Now compute how many pages this represents
537 //
538 PageCount = (ULONG)(Pfn1 - StartPfn + 1);
539
540 //
541 // So we can know how much to unmap (recall we piggyback on I/O mappings)
542 //
543 MmUnmapIoSpace(BaseAddress, PageCount << PAGE_SHIFT);
544
545 //
546 // Lock the PFN database
547 //
548 OldIrql = MiAcquirePfnLock();
549
550 //
551 // Loop all the pages
552 //
553 LastPage = PageFrameIndex + PageCount;
554 Pfn1 = MiGetPfnEntry(PageFrameIndex);
555 do
556 {
557 /* Decrement the share count and move on */
558 MiDecrementShareCount(Pfn1++, PageFrameIndex++);
559 } while (PageFrameIndex < LastPage);
560
561 //
562 // Release the PFN lock
563 //
564 MiReleasePfnLock(OldIrql);
565 }
566
567 /* PUBLIC FUNCTIONS ***********************************************************/
568
569 /*
570 * @implemented
571 */
572 PVOID
573 NTAPI
574 MmAllocateContiguousMemorySpecifyCache(IN SIZE_T NumberOfBytes,
575 IN PHYSICAL_ADDRESS LowestAcceptableAddress OPTIONAL,
576 IN PHYSICAL_ADDRESS HighestAcceptableAddress,
577 IN PHYSICAL_ADDRESS BoundaryAddressMultiple OPTIONAL,
578 IN MEMORY_CACHING_TYPE CacheType OPTIONAL)
579 {
580 PFN_NUMBER LowestPfn, HighestPfn, BoundaryPfn;
581
582 //
583 // Verify count and cache type
584 //
585 ASSERT(NumberOfBytes != 0);
586 ASSERT(CacheType <= MmWriteCombined);
587
588 //
589 // Convert the lowest address into a PFN
590 //
591 LowestPfn = (PFN_NUMBER)(LowestAcceptableAddress.QuadPart >> PAGE_SHIFT);
592 if (BYTE_OFFSET(LowestAcceptableAddress.LowPart)) LowestPfn++;
593
594 //
595 // Convert and validate the boundary address into a PFN
596 //
597 if (BYTE_OFFSET(BoundaryAddressMultiple.LowPart)) return NULL;
598 BoundaryPfn = (PFN_NUMBER)(BoundaryAddressMultiple.QuadPart >> PAGE_SHIFT);
599
600 //
601 // Convert the highest address into a PFN
602 //
603 HighestPfn = (PFN_NUMBER)(HighestAcceptableAddress.QuadPart >> PAGE_SHIFT);
604 if (HighestPfn > MmHighestPhysicalPage) HighestPfn = MmHighestPhysicalPage;
605
606 //
607 // Validate the PFN bounds
608 //
609 if (LowestPfn > HighestPfn) return NULL;
610
611 //
612 // Let the contiguous memory allocator handle it
613 //
614 return MiAllocateContiguousMemory(NumberOfBytes,
615 LowestPfn,
616 HighestPfn,
617 BoundaryPfn,
618 CacheType);
619 }
620
621 /*
622 * @implemented
623 */
624 PVOID
625 NTAPI
626 MmAllocateContiguousMemory(IN SIZE_T NumberOfBytes,
627 IN PHYSICAL_ADDRESS HighestAcceptableAddress)
628 {
629 PFN_NUMBER HighestPfn;
630
631 //
632 // Verify byte count
633 //
634 ASSERT(NumberOfBytes != 0);
635
636 //
637 // Convert and normalize the highest address into a PFN
638 //
639 HighestPfn = (PFN_NUMBER)(HighestAcceptableAddress.QuadPart >> PAGE_SHIFT);
640 if (HighestPfn > MmHighestPhysicalPage) HighestPfn = MmHighestPhysicalPage;
641
642 //
643 // Let the contiguous memory allocator handle it
644 //
645 return MiAllocateContiguousMemory(NumberOfBytes, 0, HighestPfn, 0, MmCached);
646 }
647
648 /*
649 * @implemented
650 */
651 VOID
652 NTAPI
653 MmFreeContiguousMemory(IN PVOID BaseAddress)
654 {
655 //
656 // Let the contiguous memory allocator handle it
657 //
658 MiFreeContiguousMemory(BaseAddress);
659 }
660
661 /*
662 * @implemented
663 */
664 VOID
665 NTAPI
666 MmFreeContiguousMemorySpecifyCache(IN PVOID BaseAddress,
667 IN SIZE_T NumberOfBytes,
668 IN MEMORY_CACHING_TYPE CacheType)
669 {
670 //
671 // Just call the non-cached version (there's no cache issues for freeing)
672 //
673 MiFreeContiguousMemory(BaseAddress);
674 }
675
676 /* EOF */