- Revert 44301
[reactos.git] / ntoskrnl / mm / ARM3 / pool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/pool.c
5 * PURPOSE: ARM Memory Manager Pool Allocator
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::POOL"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
18
19 /* GLOBALS ********************************************************************/
20
21 LIST_ENTRY MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
22 PFN_NUMBER MmNumberOfFreeNonPagedPool, MiExpansionPoolPagesInitialCharge;
23 PVOID MmNonPagedPoolEnd0;
24 PFN_NUMBER MiStartOfInitialPoolFrame, MiEndOfInitialPoolFrame;
25 KGUARDED_MUTEX MmPagedPoolMutex;
26 MM_PAGED_POOL_INFO MmPagedPoolInfo;
27 SIZE_T MmAllocatedNonPagedPool;
28 ULONG MmSpecialPoolTag;
29
30 /* PRIVATE FUNCTIONS **********************************************************/
31
32 VOID
33 NTAPI
34 MiInitializeArmPool(VOID)
35 {
36 ULONG i;
37 PFN_NUMBER PoolPages;
38 PMMFREE_POOL_ENTRY FreeEntry, FirstEntry;
39 PMMPTE PointerPte;
40 PAGED_CODE();
41
42 //
43 // We keep 4 lists of free pages (4 lists help avoid contention)
44 //
45 for (i = 0; i < MI_MAX_FREE_PAGE_LISTS; i++)
46 {
47 //
48 // Initialize each of them
49 //
50 InitializeListHead(&MmNonPagedPoolFreeListHead[i]);
51 }
52
53 //
54 // Calculate how many pages the initial nonpaged pool has
55 //
56 PoolPages = BYTES_TO_PAGES(MmSizeOfNonPagedPoolInBytes);
57 MmNumberOfFreeNonPagedPool = PoolPages;
58
59 //
60 // Initialize the first free entry
61 //
62 FreeEntry = MmNonPagedPoolStart;
63 FirstEntry = FreeEntry;
64 FreeEntry->Size = PoolPages;
65 FreeEntry->Owner = FirstEntry;
66
67 //
68 // Insert it into the last list
69 //
70 InsertHeadList(&MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS - 1],
71 &FreeEntry->List);
72
73 //
74 // Now create free entries for every single other page
75 //
76 while (PoolPages-- > 1)
77 {
78 //
79 // Link them all back to the original entry
80 //
81 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)FreeEntry + PAGE_SIZE);
82 FreeEntry->Owner = FirstEntry;
83 }
84
85 //
86 // Validate and remember first allocated pool page
87 //
88 PointerPte = MiAddressToPte(MmNonPagedPoolStart);
89 ASSERT(PointerPte->u.Hard.Valid == 1);
90 MiStartOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
91
92 //
93 // Keep track of where initial nonpaged pool ends
94 //
95 MmNonPagedPoolEnd0 = (PVOID)((ULONG_PTR)MmNonPagedPoolStart +
96 MmSizeOfNonPagedPoolInBytes);
97
98 //
99 // Validate and remember last allocated pool page
100 //
101 PointerPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolEnd0 - 1));
102 ASSERT(PointerPte->u.Hard.Valid == 1);
103 MiEndOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
104
105 //
106 // Validate the first nonpaged pool expansion page (which is a guard page)
107 //
108 PointerPte = MiAddressToPte(MmNonPagedPoolExpansionStart);
109 ASSERT(PointerPte->u.Hard.Valid == 0);
110
111 //
112 // Calculate the size of the expansion region alone
113 //
114 MiExpansionPoolPagesInitialCharge =
115 BYTES_TO_PAGES(MmMaximumNonPagedPoolInBytes - MmSizeOfNonPagedPoolInBytes);
116
117 //
118 // Remove 2 pages, since there's a guard page on top and on the bottom
119 //
120 MiExpansionPoolPagesInitialCharge -= 2;
121
122 //
123 // Now initialize the nonpaged pool expansion PTE space. Remember there's a
124 // guard page on top so make sure to skip it. The bottom guard page will be
125 // guaranteed by the fact our size is off by one.
126 //
127 MiInitializeSystemPtes(PointerPte + 1,
128 MiExpansionPoolPagesInitialCharge,
129 NonPagedPoolExpansion);
130 }
131
132 PVOID
133 NTAPI
134 MiAllocatePoolPages(IN POOL_TYPE PoolType,
135 IN SIZE_T SizeInBytes)
136 {
137 PFN_NUMBER SizeInPages, PageFrameNumber;
138 ULONG i;
139 KIRQL OldIrql;
140 PLIST_ENTRY NextEntry, NextHead, LastHead;
141 PMMPTE PointerPte, StartPte;
142 MMPTE TempPte;
143 PMMPFN Pfn1;
144 PVOID BaseVa;
145 PMMFREE_POOL_ENTRY FreeEntry;
146 PKSPIN_LOCK_QUEUE LockQueue;
147
148 //
149 // Figure out how big the allocation is in pages
150 //
151 SizeInPages = BYTES_TO_PAGES(SizeInBytes);
152
153 //
154 // Handle paged pool
155 //
156 if (PoolType == PagedPool)
157 {
158 //
159 // Lock the paged pool mutex
160 //
161 KeAcquireGuardedMutex(&MmPagedPoolMutex);
162
163 //
164 // Find some empty allocation space
165 //
166 i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
167 SizeInPages,
168 MmPagedPoolInfo.PagedPoolHint);
169 if (i == 0xFFFFFFFF)
170 {
171 //
172 // Out of memory!
173 //
174 DPRINT1("OUT OF PAGED POOL!!!\n");
175 KeReleaseGuardedMutex(&MmPagedPoolMutex);
176 return NULL;
177 }
178
179 //
180 // Update the pool hint if the request was just one page
181 //
182 if (SizeInPages == 1) MmPagedPoolInfo.PagedPoolHint = i + 1;
183
184 //
185 // Update the end bitmap so we know the bounds of this allocation when
186 // the time comes to free it
187 //
188 RtlSetBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, i + SizeInPages - 1);
189
190 //
191 // Now we can release the lock (it mainly protects the bitmap)
192 //
193 KeReleaseGuardedMutex(&MmPagedPoolMutex);
194
195 //
196 // Now figure out where this allocation starts
197 //
198 BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT));
199
200 //
201 // Flush the TLB
202 //
203 KeFlushEntireTb(TRUE, TRUE);
204
205 //
206 // Setup a demand-zero writable PTE
207 //
208 TempPte.u.Long = 0;
209 MI_MAKE_WRITE_PAGE(&TempPte);
210
211 //
212 // Find the first and last PTE, then loop them all
213 //
214 PointerPte = MiAddressToPte(BaseVa);
215 StartPte = PointerPte + SizeInPages;
216 do
217 {
218 //
219 // Write the demand zero PTE and keep going
220 //
221 *PointerPte++ = TempPte;
222 } while (PointerPte < StartPte);
223
224 //
225 // Return the allocation address to the caller
226 //
227 return BaseVa;
228 }
229
230 //
231 // Allocations of less than 4 pages go into their individual buckets
232 //
233 i = SizeInPages - 1;
234 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
235
236 //
237 // Loop through all the free page lists based on the page index
238 //
239 NextHead = &MmNonPagedPoolFreeListHead[i];
240 LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
241
242 //
243 // Acquire the nonpaged pool lock
244 //
245 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
246 do
247 {
248 //
249 // Now loop through all the free page entries in this given list
250 //
251 NextEntry = NextHead->Flink;
252 while (NextEntry != NextHead)
253 {
254 //
255 // Grab the entry and see if it can handle our allocation
256 //
257 FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List);
258 if (FreeEntry->Size >= SizeInPages)
259 {
260 //
261 // It does, so consume the pages from here
262 //
263 FreeEntry->Size -= SizeInPages;
264
265 //
266 // The allocation will begin in this free page area
267 //
268 BaseVa = (PVOID)((ULONG_PTR)FreeEntry +
269 (FreeEntry->Size << PAGE_SHIFT));
270
271 //
272 // This is not a free page segment anymore
273 //
274 RemoveEntryList(&FreeEntry->List);
275
276 //
277 // However, check if its' still got space left
278 //
279 if (FreeEntry->Size != 0)
280 {
281 //
282 // Insert it back into a different list, based on its pages
283 //
284 i = FreeEntry->Size - 1;
285 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
286 InsertTailList (&MmNonPagedPoolFreeListHead[i],
287 &FreeEntry->List);
288 }
289
290 //
291 // Grab the PTE for this allocation
292 //
293 PointerPte = MiAddressToPte(BaseVa);
294 ASSERT(PointerPte->u.Hard.Valid == 1);
295
296 //
297 // Grab the PFN NextEntry and index
298 //
299 Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
300
301 //
302 // Now mark it as the beginning of an allocation
303 //
304 ASSERT(Pfn1->u3.e1.StartOfAllocation == 0);
305 Pfn1->u3.e1.StartOfAllocation = 1;
306
307 //
308 // Check if the allocation is larger than one page
309 //
310 if (SizeInPages != 1)
311 {
312 //
313 // Navigate to the last PFN entry and PTE
314 //
315 PointerPte += SizeInPages - 1;
316 ASSERT(PointerPte->u.Hard.Valid == 1);
317 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
318 }
319
320 //
321 // Mark this PFN as the last (might be the same as the first)
322 //
323 ASSERT(Pfn1->u3.e1.EndOfAllocation == 0);
324 Pfn1->u3.e1.EndOfAllocation = 1;
325
326 //
327 // Release the nonpaged pool lock, and return the allocation
328 //
329 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
330 return BaseVa;
331 }
332
333 //
334 // Try the next free page entry
335 //
336 NextEntry = FreeEntry->List.Flink;
337 }
338 } while (++NextHead < LastHead);
339
340 //
341 // If we got here, we're out of space.
342 // Start by releasing the lock
343 //
344 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
345
346 //
347 // Allocate some system PTEs
348 //
349 StartPte = MiReserveSystemPtes(SizeInPages, NonPagedPoolExpansion);
350 PointerPte = StartPte;
351 if (StartPte == NULL)
352 {
353 //
354 // Ran out of memory
355 //
356 DPRINT1("Out of NP Expansion Pool\n");
357 return NULL;
358 }
359
360 //
361 // Acquire the pool lock now
362 //
363 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
364
365 //
366 // Lock the PFN database too
367 //
368 LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueuePfnLock];
369 KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);
370
371 //
372 // Loop the pages
373 //
374 TempPte = HyperTemplatePte;
375 do
376 {
377 //
378 // Allocate a page
379 //
380 PageFrameNumber = MmAllocPage(MC_NPPOOL, 0);
381
382 //
383 // Get the PFN entry for it
384 //
385 Pfn1 = MiGetPfnEntry(PageFrameNumber);
386
387 //
388 // Write the PTE for it
389 //
390 TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
391 ASSERT(PointerPte->u.Hard.Valid == 0);
392 ASSERT(TempPte.u.Hard.Valid == 1);
393 *PointerPte++ = TempPte;
394 } while (--SizeInPages > 0);
395
396 //
397 // This is the last page
398 //
399 Pfn1->u3.e1.EndOfAllocation = 1;
400
401 //
402 // Get the first page and mark it as such
403 //
404 Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber);
405 Pfn1->u3.e1.StartOfAllocation = 1;
406
407 //
408 // Release the PFN and nonpaged pool lock
409 //
410 KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
411 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
412
413 //
414 // Return the address
415 //
416 return MiPteToAddress(StartPte);
417 }
418
419 ULONG
420 NTAPI
421 MiFreePoolPages(IN PVOID StartingVa)
422 {
423 PMMPTE PointerPte, StartPte;
424 PMMPFN Pfn1, StartPfn;
425 PFN_NUMBER FreePages, NumberOfPages;
426 KIRQL OldIrql;
427 PMMFREE_POOL_ENTRY FreeEntry, NextEntry, LastEntry;
428 ULONG i, End;
429
430 //
431 // Handle paged pool
432 //
433 if ((StartingVa >= MmPagedPoolStart) && (StartingVa <= MmPagedPoolEnd))
434 {
435 //
436 // Calculate the offset from the beginning of paged pool, and convert it
437 // into pages
438 //
439 i = ((ULONG_PTR)StartingVa - (ULONG_PTR)MmPagedPoolStart) >> PAGE_SHIFT;
440 End = i;
441
442 //
443 // Now use the end bitmap to scan until we find a set bit, meaning that
444 // this allocation finishes here
445 //
446 while (!RtlTestBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End)) End++;
447
448 //
449 // Now calculate the total number of pages this allocation spans
450 //
451 NumberOfPages = End - i + 1;
452
453 //
454 // Acquire the paged pool lock
455 //
456 KeAcquireGuardedMutex(&MmPagedPoolMutex);
457
458 //
459 // Clear the allocation and free bits
460 //
461 RtlClearBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, i);
462 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, i, NumberOfPages);
463
464 //
465 // Update the hint if we need to
466 //
467 if (i < MmPagedPoolInfo.PagedPoolHint) MmPagedPoolInfo.PagedPoolHint = i;
468
469 //
470 // Release the lock protecting the bitmaps
471 //
472 KeReleaseGuardedMutex(&MmPagedPoolMutex);
473
474 //
475 // And finally return the number of pages freed
476 //
477 return NumberOfPages;
478 }
479
480 //
481 // Get the first PTE and its corresponding PFN entry
482 //
483 StartPte = PointerPte = MiAddressToPte(StartingVa);
484 StartPfn = Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
485
486 //
487 // Loop until we find the last PTE
488 //
489 while (Pfn1->u3.e1.EndOfAllocation == 0)
490 {
491 //
492 // Keep going
493 //
494 PointerPte++;
495 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
496 }
497
498 //
499 // Now we know how many pages we have
500 //
501 NumberOfPages = PointerPte - StartPte + 1;
502
503 //
504 // Acquire the nonpaged pool lock
505 //
506 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
507
508 //
509 // Mark the first and last PTEs as not part of an allocation anymore
510 //
511 StartPfn->u3.e1.StartOfAllocation = 0;
512 Pfn1->u3.e1.EndOfAllocation = 0;
513
514 //
515 // Assume we will free as many pages as the allocation was
516 //
517 FreePages = NumberOfPages;
518
519 //
520 // Peek one page past the end of the allocation
521 //
522 PointerPte++;
523
524 //
525 // Guard against going past initial nonpaged pool
526 //
527 if (MiGetPfnEntryIndex(Pfn1) == MiEndOfInitialPoolFrame)
528 {
529 //
530 // This page is on the outskirts of initial nonpaged pool, so ignore it
531 //
532 Pfn1 = NULL;
533 }
534 else
535 {
536 //
537 // Otherwise, our entire allocation must've fit within the initial non
538 // paged pool, or the expansion nonpaged pool, so get the PFN entry of
539 // the next allocation
540 //
541 ASSERT((ULONG_PTR)StartingVa + NumberOfPages <= (ULONG_PTR)MmNonPagedPoolEnd);
542 if (PointerPte->u.Hard.Valid == 1)
543 {
544 //
545 // It's either expansion or initial: get the PFN entry
546 //
547 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
548 }
549 else
550 {
551 //
552 // This means we've reached the guard page that protects the end of
553 // the expansion nonpaged pool
554 //
555 Pfn1 = NULL;
556 }
557
558 }
559
560 //
561 // Check if this allocation actually exists
562 //
563 if ((Pfn1) && (Pfn1->u3.e1.StartOfAllocation == 0))
564 {
565 //
566 // It doesn't, so we should actually locate a free entry descriptor
567 //
568 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa +
569 (NumberOfPages << PAGE_SHIFT));
570 ASSERT(FreeEntry->Owner == FreeEntry);
571
572 //
573 // Consume this entry's pages, and remove it from its free list
574 //
575 FreePages += FreeEntry->Size;
576 RemoveEntryList (&FreeEntry->List);
577 }
578
579 //
580 // Now get the official free entry we'll create for the caller's allocation
581 //
582 FreeEntry = StartingVa;
583
584 //
585 // Check if the our allocation is the very first page
586 //
587 if (MiGetPfnEntryIndex(StartPfn) == MiStartOfInitialPoolFrame)
588 {
589 //
590 // Then we can't do anything or we'll risk underflowing
591 //
592 Pfn1 = NULL;
593 }
594 else
595 {
596 //
597 // Otherwise, get the PTE for the page right before our allocation
598 //
599 PointerPte -= NumberOfPages + 1;
600 if (PointerPte->u.Hard.Valid == 1)
601 {
602 //
603 // It's either expansion or initial nonpaged pool, get the PFN entry
604 //
605 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
606 }
607 else
608 {
609 //
610 // We must've reached the guard page, so don't risk touching it
611 //
612 Pfn1 = NULL;
613 }
614 }
615
616 //
617 // Check if there is a valid PFN entry for the page before the allocation
618 // and then check if this page was actually the end of an allocation.
619 // If it wasn't, then we know for sure it's a free page
620 //
621 if ((Pfn1) && (Pfn1->u3.e1.EndOfAllocation == 0))
622 {
623 //
624 // Get the free entry descriptor for that given page range
625 //
626 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa - PAGE_SIZE);
627 FreeEntry = FreeEntry->Owner;
628
629 //
630 // Check if the entry is small enough to be indexed on a free list
631 // If it is, we'll want to re-insert it, since we're about to
632 // collapse our pages on top of it, which will change its count
633 //
634 if (FreeEntry->Size < (MI_MAX_FREE_PAGE_LISTS - 1))
635 {
636 //
637 // Remove the list from where it is now
638 //
639 RemoveEntryList(&FreeEntry->List);
640
641 //
642 // Update its size
643 //
644 FreeEntry->Size += FreePages;
645
646 //
647 // And now find the new appropriate list to place it in
648 //
649 i = (ULONG)(FreeEntry->Size - 1);
650 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
651
652 //
653 // Do it
654 //
655 InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
656 }
657 else
658 {
659 //
660 // Otherwise, just combine our free pages into this entry
661 //
662 FreeEntry->Size += FreePages;
663 }
664 }
665
666 //
667 // Check if we were unable to do any compaction, and we'll stick with this
668 //
669 if (FreeEntry == StartingVa)
670 {
671 //
672 // Well, now we are a free entry. At worse we just have our newly freed
673 // pages, at best we have our pages plus whatever entry came after us
674 //
675 FreeEntry->Size = FreePages;
676
677 //
678 // Find the appropriate list we should be on
679 //
680 i = FreeEntry->Size - 1;
681 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
682
683 //
684 // And insert us
685 //
686 InsertTailList (&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
687 }
688
689 //
690 // Just a sanity check
691 //
692 ASSERT(FreePages != 0);
693
694 //
695 // Get all the pages between our allocation and its end. These will all now
696 // become free page chunks.
697 //
698 NextEntry = StartingVa;
699 LastEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + (FreePages << PAGE_SHIFT));
700 do
701 {
702 //
703 // Link back to the parent free entry, and keep going
704 //
705 NextEntry->Owner = FreeEntry;
706 NextEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + PAGE_SIZE);
707 } while (NextEntry != LastEntry);
708
709 //
710 // We're done, release the lock and let the caller know how much we freed
711 //
712 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
713 return NumberOfPages;
714 }
715
716
717 BOOLEAN
718 NTAPI
719 MiRaisePoolQuota(IN POOL_TYPE PoolType,
720 IN ULONG CurrentMaxQuota,
721 OUT PULONG NewMaxQuota)
722 {
723 //
724 // Not implemented
725 //
726 UNIMPLEMENTED;
727 *NewMaxQuota = CurrentMaxQuota + 65536;
728 return TRUE;
729 }
730
731 /* PUBLIC FUNCTIONS ***********************************************************/
732
733 /*
734 * @unimplemented
735 */
736 PVOID
737 NTAPI
738 MmAllocateMappingAddress(IN SIZE_T NumberOfBytes,
739 IN ULONG PoolTag)
740 {
741 UNIMPLEMENTED;
742 return NULL;
743 }
744
745 /*
746 * @unimplemented
747 */
748 VOID
749 NTAPI
750 MmFreeMappingAddress(IN PVOID BaseAddress,
751 IN ULONG PoolTag)
752 {
753 UNIMPLEMENTED;
754 }
755
756 /* EOF */