[NTOS] Inline writing to PTEs through MI_WRITE_VALID/INVALID_PTE.
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / pool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/pool.c
5 * PURPOSE: ARM Memory Manager Pool Allocator
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::POOL"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
18
19 /* GLOBALS ********************************************************************/
20
21 LIST_ENTRY MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
22 PFN_NUMBER MmNumberOfFreeNonPagedPool, MiExpansionPoolPagesInitialCharge;
23 PVOID MmNonPagedPoolEnd0;
24 PFN_NUMBER MiStartOfInitialPoolFrame, MiEndOfInitialPoolFrame;
25 KGUARDED_MUTEX MmPagedPoolMutex;
26 MM_PAGED_POOL_INFO MmPagedPoolInfo;
27 SIZE_T MmAllocatedNonPagedPool;
28 ULONG MmSpecialPoolTag;
29 ULONG MmConsumedPoolPercentage;
30 BOOLEAN MmProtectFreedNonPagedPool;
31
32 /* PRIVATE FUNCTIONS **********************************************************/
33
34 VOID
35 NTAPI
36 MiInitializeNonPagedPoolThresholds(VOID)
37 {
38 PFN_NUMBER Size = MmMaximumNonPagedPoolInPages;
39
40 /* Default low threshold of 8MB or one third of nonpaged pool */
41 MiLowNonPagedPoolThreshold = (8 * _1MB) >> PAGE_SHIFT;
42 MiLowNonPagedPoolThreshold = min(MiLowNonPagedPoolThreshold, Size / 3);
43
44 /* Default high threshold of 20MB or 50% */
45 MiHighNonPagedPoolThreshold = (20 * _1MB) >> PAGE_SHIFT;
46 MiHighNonPagedPoolThreshold = min(MiHighNonPagedPoolThreshold, Size / 2);
47 ASSERT(MiLowNonPagedPoolThreshold < MiHighNonPagedPoolThreshold);
48 }
49
50 VOID
51 NTAPI
52 MiInitializePoolEvents(VOID)
53 {
54 KIRQL OldIrql;
55 PFN_NUMBER FreePoolInPages;
56
57 /* Lock paged pool */
58 KeAcquireGuardedMutex(&MmPagedPoolMutex);
59
60 /* Total size of the paged pool minus the allocated size, is free */
61 FreePoolInPages = MmSizeOfPagedPoolInPages - MmPagedPoolInfo.AllocatedPagedPool;
62
63 /* Check the initial state high state */
64 if (FreePoolInPages >= MiHighPagedPoolThreshold)
65 {
66 /* We have plenty of pool */
67 KeSetEvent(MiHighPagedPoolEvent, 0, FALSE);
68 }
69 else
70 {
71 /* We don't */
72 KeClearEvent(MiHighPagedPoolEvent);
73 }
74
75 /* Check the initial low state */
76 if (FreePoolInPages <= MiLowPagedPoolThreshold)
77 {
78 /* We're very low in free pool memory */
79 KeSetEvent(MiLowPagedPoolEvent, 0, FALSE);
80 }
81 else
82 {
83 /* We're not */
84 KeClearEvent(MiLowPagedPoolEvent);
85 }
86
87 /* Release the paged pool lock */
88 KeReleaseGuardedMutex(&MmPagedPoolMutex);
89
90 /* Now it's time for the nonpaged pool lock */
91 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
92
93 /* Free pages are the maximum minus what's been allocated */
94 FreePoolInPages = MmMaximumNonPagedPoolInPages - MmAllocatedNonPagedPool;
95
96 /* Check if we have plenty */
97 if (FreePoolInPages >= MiHighNonPagedPoolThreshold)
98 {
99 /* We do, set the event */
100 KeSetEvent(MiHighNonPagedPoolEvent, 0, FALSE);
101 }
102 else
103 {
104 /* We don't, clear the event */
105 KeClearEvent(MiHighNonPagedPoolEvent);
106 }
107
108 /* Check if we have very little */
109 if (FreePoolInPages <= MiLowNonPagedPoolThreshold)
110 {
111 /* We do, set the event */
112 KeSetEvent(MiLowNonPagedPoolEvent, 0, FALSE);
113 }
114 else
115 {
116 /* We don't, clear it */
117 KeClearEvent(MiLowNonPagedPoolEvent);
118 }
119
120 /* We're done, release the nonpaged pool lock */
121 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
122 }
123
124 VOID
125 NTAPI
126 MiInitializeNonPagedPool(VOID)
127 {
128 ULONG i;
129 PFN_NUMBER PoolPages;
130 PMMFREE_POOL_ENTRY FreeEntry, FirstEntry;
131 PMMPTE PointerPte;
132 PAGED_CODE();
133
134 //
135 // We keep 4 lists of free pages (4 lists help avoid contention)
136 //
137 for (i = 0; i < MI_MAX_FREE_PAGE_LISTS; i++)
138 {
139 //
140 // Initialize each of them
141 //
142 InitializeListHead(&MmNonPagedPoolFreeListHead[i]);
143 }
144
145 //
146 // Calculate how many pages the initial nonpaged pool has
147 //
148 PoolPages = BYTES_TO_PAGES(MmSizeOfNonPagedPoolInBytes);
149 MmNumberOfFreeNonPagedPool = PoolPages;
150
151 //
152 // Initialize the first free entry
153 //
154 FreeEntry = MmNonPagedPoolStart;
155 FirstEntry = FreeEntry;
156 FreeEntry->Size = PoolPages;
157 FreeEntry->Owner = FirstEntry;
158
159 //
160 // Insert it into the last list
161 //
162 InsertHeadList(&MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS - 1],
163 &FreeEntry->List);
164
165 //
166 // Now create free entries for every single other page
167 //
168 while (PoolPages-- > 1)
169 {
170 //
171 // Link them all back to the original entry
172 //
173 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)FreeEntry + PAGE_SIZE);
174 FreeEntry->Owner = FirstEntry;
175 }
176
177 //
178 // Validate and remember first allocated pool page
179 //
180 PointerPte = MiAddressToPte(MmNonPagedPoolStart);
181 ASSERT(PointerPte->u.Hard.Valid == 1);
182 MiStartOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
183
184 //
185 // Keep track of where initial nonpaged pool ends
186 //
187 MmNonPagedPoolEnd0 = (PVOID)((ULONG_PTR)MmNonPagedPoolStart +
188 MmSizeOfNonPagedPoolInBytes);
189
190 //
191 // Validate and remember last allocated pool page
192 //
193 PointerPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolEnd0 - 1));
194 ASSERT(PointerPte->u.Hard.Valid == 1);
195 MiEndOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
196
197 //
198 // Validate the first nonpaged pool expansion page (which is a guard page)
199 //
200 PointerPte = MiAddressToPte(MmNonPagedPoolExpansionStart);
201 ASSERT(PointerPte->u.Hard.Valid == 0);
202
203 //
204 // Calculate the size of the expansion region alone
205 //
206 MiExpansionPoolPagesInitialCharge =
207 BYTES_TO_PAGES(MmMaximumNonPagedPoolInBytes - MmSizeOfNonPagedPoolInBytes);
208
209 //
210 // Remove 2 pages, since there's a guard page on top and on the bottom
211 //
212 MiExpansionPoolPagesInitialCharge -= 2;
213
214 //
215 // Now initialize the nonpaged pool expansion PTE space. Remember there's a
216 // guard page on top so make sure to skip it. The bottom guard page will be
217 // guaranteed by the fact our size is off by one.
218 //
219 MiInitializeSystemPtes(PointerPte + 1,
220 MiExpansionPoolPagesInitialCharge,
221 NonPagedPoolExpansion);
222 }
223
224 PVOID
225 NTAPI
226 MiAllocatePoolPages(IN POOL_TYPE PoolType,
227 IN SIZE_T SizeInBytes)
228 {
229 PFN_NUMBER SizeInPages, PageFrameNumber;
230 ULONG i;
231 KIRQL OldIrql;
232 PLIST_ENTRY NextEntry, NextHead, LastHead;
233 PMMPTE PointerPte, StartPte;
234 MMPTE TempPte;
235 PMMPFN Pfn1;
236 PVOID BaseVa, BaseVaStart;
237 PMMFREE_POOL_ENTRY FreeEntry;
238 PKSPIN_LOCK_QUEUE LockQueue;
239
240 //
241 // Figure out how big the allocation is in pages
242 //
243 SizeInPages = BYTES_TO_PAGES(SizeInBytes);
244
245 //
246 // Handle paged pool
247 //
248 if (PoolType == PagedPool)
249 {
250 //
251 // Lock the paged pool mutex
252 //
253 KeAcquireGuardedMutex(&MmPagedPoolMutex);
254
255 //
256 // Find some empty allocation space
257 //
258 i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
259 SizeInPages,
260 MmPagedPoolInfo.PagedPoolHint);
261 if (i == 0xFFFFFFFF)
262 {
263 //
264 // Get the page bit count
265 //
266 i = ((SizeInPages - 1) / 1024) + 1;
267 DPRINT1("Paged pool expansion: %d %x\n", i, SizeInPages);
268
269 //
270 // Check if there is enougn paged pool expansion space left
271 //
272 if (MmPagedPoolInfo.NextPdeForPagedPoolExpansion >
273 MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
274 {
275 //
276 // Out of memory!
277 //
278 DPRINT1("OUT OF PAGED POOL!!!\n");
279 KeReleaseGuardedMutex(&MmPagedPoolMutex);
280 return NULL;
281 }
282
283 //
284 // Check if we'll have to expand past the last PTE we have available
285 //
286 if (((i - 1) + MmPagedPoolInfo.NextPdeForPagedPoolExpansion) >
287 MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
288 {
289 //
290 // We can only support this much then
291 //
292 SizeInPages = MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool) -
293 MmPagedPoolInfo.NextPdeForPagedPoolExpansion +
294 1;
295 ASSERT(SizeInPages < i);
296 i = SizeInPages;
297 }
298 else
299 {
300 //
301 // Otherwise, there is plenty of space left for this expansion
302 //
303 SizeInPages = i;
304 }
305
306 //
307 // Get the template PTE we'll use to expand
308 //
309 TempPte = ValidKernelPte;
310
311 //
312 // Get the first PTE in expansion space
313 //
314 PointerPte = MmPagedPoolInfo.NextPdeForPagedPoolExpansion;
315 BaseVa = MiPteToAddress(PointerPte);
316 BaseVaStart = BaseVa;
317
318 //
319 // Lock the PFN database and loop pages
320 //
321 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
322 do
323 {
324 //
325 // It should not already be valid
326 //
327 ASSERT(PointerPte->u.Hard.Valid == 0);
328
329 /* Request a page */
330 PageFrameNumber = MiRemoveAnyPage(0);
331 TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
332
333 //
334 // Save it into our double-buffered system page directory
335 //
336 /* This seems to be making the assumption that one PDE is one page long */
337 C_ASSERT(PAGE_SIZE == (PD_COUNT * (sizeof(MMPTE) * PDE_COUNT)));
338 MmSystemPagePtes[(ULONG_PTR)PointerPte & (PAGE_SIZE - 1) /
339 sizeof(MMPTE)] = TempPte;
340
341 /* Initialize the PFN */
342 MiInitializePfnForOtherProcess(PageFrameNumber,
343 PointerPte,
344 MmSystemPageDirectory[(PointerPte - (PMMPTE)PDE_BASE) / PDE_COUNT]);
345
346 /* Write the actual PTE now */
347 MI_WRITE_VALID_PTE(PointerPte++, TempPte);
348
349 //
350 // Move on to the next expansion address
351 //
352 BaseVa = (PVOID)((ULONG_PTR)BaseVa + PAGE_SIZE);
353 } while (--i > 0);
354
355 //
356 // Release the PFN database lock
357 //
358 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
359
360 //
361 // These pages are now available, clear their availablity bits
362 //
363 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap,
364 (MmPagedPoolInfo.NextPdeForPagedPoolExpansion -
365 MiAddressToPte(MmPagedPoolInfo.FirstPteForPagedPool)) *
366 1024,
367 SizeInPages * 1024);
368
369 //
370 // Update the next expansion location
371 //
372 MmPagedPoolInfo.NextPdeForPagedPoolExpansion += SizeInPages;
373
374 //
375 // Zero out the newly available memory
376 //
377 RtlZeroMemory(BaseVaStart, SizeInPages * PAGE_SIZE);
378
379 //
380 // Now try consuming the pages again
381 //
382 SizeInPages = BYTES_TO_PAGES(SizeInBytes);
383 i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
384 SizeInPages,
385 0);
386 if (i == 0xFFFFFFFF)
387 {
388 //
389 // Out of memory!
390 //
391 DPRINT1("OUT OF PAGED POOL!!!\n");
392 KeReleaseGuardedMutex(&MmPagedPoolMutex);
393 return NULL;
394 }
395 }
396
397 //
398 // Update the pool hint if the request was just one page
399 //
400 if (SizeInPages == 1) MmPagedPoolInfo.PagedPoolHint = i + 1;
401
402 //
403 // Update the end bitmap so we know the bounds of this allocation when
404 // the time comes to free it
405 //
406 RtlSetBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, i + SizeInPages - 1);
407
408 //
409 // Now we can release the lock (it mainly protects the bitmap)
410 //
411 KeReleaseGuardedMutex(&MmPagedPoolMutex);
412
413 //
414 // Now figure out where this allocation starts
415 //
416 BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT));
417
418 //
419 // Flush the TLB
420 //
421 KeFlushEntireTb(TRUE, TRUE);
422
423 /* Setup a demand-zero writable PTE */
424 MI_MAKE_SOFTWARE_PTE(&TempPte, MM_READWRITE);
425
426 //
427 // Find the first and last PTE, then loop them all
428 //
429 PointerPte = MiAddressToPte(BaseVa);
430 StartPte = PointerPte + SizeInPages;
431 do
432 {
433 //
434 // Write the demand zero PTE and keep going
435 //
436 ASSERT(PointerPte->u.Hard.Valid == 0);
437 *PointerPte++ = TempPte;
438 } while (PointerPte < StartPte);
439
440 //
441 // Return the allocation address to the caller
442 //
443 return BaseVa;
444 }
445
446 //
447 // Allocations of less than 4 pages go into their individual buckets
448 //
449 i = SizeInPages - 1;
450 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
451
452 //
453 // Loop through all the free page lists based on the page index
454 //
455 NextHead = &MmNonPagedPoolFreeListHead[i];
456 LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
457
458 //
459 // Acquire the nonpaged pool lock
460 //
461 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
462 do
463 {
464 //
465 // Now loop through all the free page entries in this given list
466 //
467 NextEntry = NextHead->Flink;
468 while (NextEntry != NextHead)
469 {
470 //
471 // Grab the entry and see if it can handle our allocation
472 //
473 FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List);
474 if (FreeEntry->Size >= SizeInPages)
475 {
476 //
477 // It does, so consume the pages from here
478 //
479 FreeEntry->Size -= SizeInPages;
480
481 //
482 // The allocation will begin in this free page area
483 //
484 BaseVa = (PVOID)((ULONG_PTR)FreeEntry +
485 (FreeEntry->Size << PAGE_SHIFT));
486
487 //
488 // This is not a free page segment anymore
489 //
490 RemoveEntryList(&FreeEntry->List);
491
492 //
493 // However, check if its' still got space left
494 //
495 if (FreeEntry->Size != 0)
496 {
497 //
498 // Insert it back into a different list, based on its pages
499 //
500 i = FreeEntry->Size - 1;
501 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
502 InsertTailList (&MmNonPagedPoolFreeListHead[i],
503 &FreeEntry->List);
504 }
505
506 //
507 // Grab the PTE for this allocation
508 //
509 PointerPte = MiAddressToPte(BaseVa);
510 ASSERT(PointerPte->u.Hard.Valid == 1);
511
512 //
513 // Grab the PFN NextEntry and index
514 //
515 Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
516
517 //
518 // Now mark it as the beginning of an allocation
519 //
520 ASSERT(Pfn1->u3.e1.StartOfAllocation == 0);
521 Pfn1->u3.e1.StartOfAllocation = 1;
522
523 //
524 // Check if the allocation is larger than one page
525 //
526 if (SizeInPages != 1)
527 {
528 //
529 // Navigate to the last PFN entry and PTE
530 //
531 PointerPte += SizeInPages - 1;
532 ASSERT(PointerPte->u.Hard.Valid == 1);
533 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
534 }
535
536 //
537 // Mark this PFN as the last (might be the same as the first)
538 //
539 ASSERT(Pfn1->u3.e1.EndOfAllocation == 0);
540 Pfn1->u3.e1.EndOfAllocation = 1;
541
542 //
543 // Release the nonpaged pool lock, and return the allocation
544 //
545 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
546 return BaseVa;
547 }
548
549 //
550 // Try the next free page entry
551 //
552 NextEntry = FreeEntry->List.Flink;
553 }
554 } while (++NextHead < LastHead);
555
556 //
557 // If we got here, we're out of space.
558 // Start by releasing the lock
559 //
560 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
561
562 //
563 // Allocate some system PTEs
564 //
565 StartPte = MiReserveSystemPtes(SizeInPages, NonPagedPoolExpansion);
566 PointerPte = StartPte;
567 if (StartPte == NULL)
568 {
569 //
570 // Ran out of memory
571 //
572 DPRINT1("Out of NP Expansion Pool\n");
573 return NULL;
574 }
575
576 //
577 // Acquire the pool lock now
578 //
579 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
580
581 //
582 // Lock the PFN database too
583 //
584 LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueuePfnLock];
585 KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);
586
587 //
588 // Loop the pages
589 //
590 TempPte = ValidKernelPte;
591 do
592 {
593 /* Allocate a page */
594 PageFrameNumber = MiRemoveAnyPage(0);
595
596 /* Get the PFN entry for it and fill it out */
597 Pfn1 = MiGetPfnEntry(PageFrameNumber);
598 Pfn1->u3.e2.ReferenceCount = 1;
599 Pfn1->u2.ShareCount = 1;
600 Pfn1->PteAddress = PointerPte;
601 Pfn1->u3.e1.PageLocation = ActiveAndValid;
602 Pfn1->u4.VerifierAllocation = 0;
603
604 /* Write the PTE for it */
605 TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
606 MI_WRITE_VALID_PTE(PointerPte++, TempPte);
607 } while (--SizeInPages > 0);
608
609 //
610 // This is the last page
611 //
612 Pfn1->u3.e1.EndOfAllocation = 1;
613
614 //
615 // Get the first page and mark it as such
616 //
617 Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber);
618 Pfn1->u3.e1.StartOfAllocation = 1;
619
620 //
621 // Release the PFN and nonpaged pool lock
622 //
623 KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
624 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
625
626 //
627 // Return the address
628 //
629 return MiPteToAddress(StartPte);
630 }
631
632 ULONG
633 NTAPI
634 MiFreePoolPages(IN PVOID StartingVa)
635 {
636 PMMPTE PointerPte, StartPte;
637 PMMPFN Pfn1, StartPfn;
638 PFN_NUMBER FreePages, NumberOfPages;
639 KIRQL OldIrql;
640 PMMFREE_POOL_ENTRY FreeEntry, NextEntry, LastEntry;
641 ULONG i, End;
642
643 //
644 // Handle paged pool
645 //
646 if ((StartingVa >= MmPagedPoolStart) && (StartingVa <= MmPagedPoolEnd))
647 {
648 //
649 // Calculate the offset from the beginning of paged pool, and convert it
650 // into pages
651 //
652 i = ((ULONG_PTR)StartingVa - (ULONG_PTR)MmPagedPoolStart) >> PAGE_SHIFT;
653 End = i;
654
655 //
656 // Now use the end bitmap to scan until we find a set bit, meaning that
657 // this allocation finishes here
658 //
659 while (!RtlTestBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End)) End++;
660
661 //
662 // Now calculate the total number of pages this allocation spans
663 //
664 NumberOfPages = End - i + 1;
665
666 /* Delete the actual pages */
667 PointerPte = MmPagedPoolInfo.FirstPteForPagedPool + i;
668 FreePages = MiDeleteSystemPageableVm(PointerPte, NumberOfPages, 0, NULL);
669 ASSERT(FreePages == NumberOfPages);
670
671 //
672 // Acquire the paged pool lock
673 //
674 KeAcquireGuardedMutex(&MmPagedPoolMutex);
675
676 //
677 // Clear the allocation and free bits
678 //
679 RtlClearBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, i);
680 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, i, NumberOfPages);
681
682 //
683 // Update the hint if we need to
684 //
685 if (i < MmPagedPoolInfo.PagedPoolHint) MmPagedPoolInfo.PagedPoolHint = i;
686
687 //
688 // Release the lock protecting the bitmaps
689 //
690 KeReleaseGuardedMutex(&MmPagedPoolMutex);
691
692 //
693 // And finally return the number of pages freed
694 //
695 return NumberOfPages;
696 }
697
698 //
699 // Get the first PTE and its corresponding PFN entry
700 //
701 StartPte = PointerPte = MiAddressToPte(StartingVa);
702 StartPfn = Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
703
704 //
705 // Loop until we find the last PTE
706 //
707 while (Pfn1->u3.e1.EndOfAllocation == 0)
708 {
709 //
710 // Keep going
711 //
712 PointerPte++;
713 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
714 }
715
716 //
717 // Now we know how many pages we have
718 //
719 NumberOfPages = PointerPte - StartPte + 1;
720
721 //
722 // Acquire the nonpaged pool lock
723 //
724 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
725
726 //
727 // Mark the first and last PTEs as not part of an allocation anymore
728 //
729 StartPfn->u3.e1.StartOfAllocation = 0;
730 Pfn1->u3.e1.EndOfAllocation = 0;
731
732 //
733 // Assume we will free as many pages as the allocation was
734 //
735 FreePages = NumberOfPages;
736
737 //
738 // Peek one page past the end of the allocation
739 //
740 PointerPte++;
741
742 //
743 // Guard against going past initial nonpaged pool
744 //
745 if (MiGetPfnEntryIndex(Pfn1) == MiEndOfInitialPoolFrame)
746 {
747 //
748 // This page is on the outskirts of initial nonpaged pool, so ignore it
749 //
750 Pfn1 = NULL;
751 }
752 else
753 {
754 //
755 // Otherwise, our entire allocation must've fit within the initial non
756 // paged pool, or the expansion nonpaged pool, so get the PFN entry of
757 // the next allocation
758 //
759 ASSERT((ULONG_PTR)StartingVa + NumberOfPages <= (ULONG_PTR)MmNonPagedPoolEnd);
760 if (PointerPte->u.Hard.Valid == 1)
761 {
762 //
763 // It's either expansion or initial: get the PFN entry
764 //
765 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
766 }
767 else
768 {
769 //
770 // This means we've reached the guard page that protects the end of
771 // the expansion nonpaged pool
772 //
773 Pfn1 = NULL;
774 }
775
776 }
777
778 //
779 // Check if this allocation actually exists
780 //
781 if ((Pfn1) && (Pfn1->u3.e1.StartOfAllocation == 0))
782 {
783 //
784 // It doesn't, so we should actually locate a free entry descriptor
785 //
786 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa +
787 (NumberOfPages << PAGE_SHIFT));
788 ASSERT(FreeEntry->Owner == FreeEntry);
789
790 //
791 // Consume this entry's pages, and remove it from its free list
792 //
793 FreePages += FreeEntry->Size;
794 RemoveEntryList (&FreeEntry->List);
795 }
796
797 //
798 // Now get the official free entry we'll create for the caller's allocation
799 //
800 FreeEntry = StartingVa;
801
802 //
803 // Check if the our allocation is the very first page
804 //
805 if (MiGetPfnEntryIndex(StartPfn) == MiStartOfInitialPoolFrame)
806 {
807 //
808 // Then we can't do anything or we'll risk underflowing
809 //
810 Pfn1 = NULL;
811 }
812 else
813 {
814 //
815 // Otherwise, get the PTE for the page right before our allocation
816 //
817 PointerPte -= NumberOfPages + 1;
818 if (PointerPte->u.Hard.Valid == 1)
819 {
820 //
821 // It's either expansion or initial nonpaged pool, get the PFN entry
822 //
823 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
824 }
825 else
826 {
827 //
828 // We must've reached the guard page, so don't risk touching it
829 //
830 Pfn1 = NULL;
831 }
832 }
833
834 //
835 // Check if there is a valid PFN entry for the page before the allocation
836 // and then check if this page was actually the end of an allocation.
837 // If it wasn't, then we know for sure it's a free page
838 //
839 if ((Pfn1) && (Pfn1->u3.e1.EndOfAllocation == 0))
840 {
841 //
842 // Get the free entry descriptor for that given page range
843 //
844 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa - PAGE_SIZE);
845 FreeEntry = FreeEntry->Owner;
846
847 //
848 // Check if the entry is small enough to be indexed on a free list
849 // If it is, we'll want to re-insert it, since we're about to
850 // collapse our pages on top of it, which will change its count
851 //
852 if (FreeEntry->Size < (MI_MAX_FREE_PAGE_LISTS - 1))
853 {
854 //
855 // Remove the list from where it is now
856 //
857 RemoveEntryList(&FreeEntry->List);
858
859 //
860 // Update its size
861 //
862 FreeEntry->Size += FreePages;
863
864 //
865 // And now find the new appropriate list to place it in
866 //
867 i = (ULONG)(FreeEntry->Size - 1);
868 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
869
870 //
871 // Do it
872 //
873 InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
874 }
875 else
876 {
877 //
878 // Otherwise, just combine our free pages into this entry
879 //
880 FreeEntry->Size += FreePages;
881 }
882 }
883
884 //
885 // Check if we were unable to do any compaction, and we'll stick with this
886 //
887 if (FreeEntry == StartingVa)
888 {
889 //
890 // Well, now we are a free entry. At worse we just have our newly freed
891 // pages, at best we have our pages plus whatever entry came after us
892 //
893 FreeEntry->Size = FreePages;
894
895 //
896 // Find the appropriate list we should be on
897 //
898 i = FreeEntry->Size - 1;
899 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
900
901 //
902 // And insert us
903 //
904 InsertTailList (&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
905 }
906
907 //
908 // Just a sanity check
909 //
910 ASSERT(FreePages != 0);
911
912 //
913 // Get all the pages between our allocation and its end. These will all now
914 // become free page chunks.
915 //
916 NextEntry = StartingVa;
917 LastEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + (FreePages << PAGE_SHIFT));
918 do
919 {
920 //
921 // Link back to the parent free entry, and keep going
922 //
923 NextEntry->Owner = FreeEntry;
924 NextEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + PAGE_SIZE);
925 } while (NextEntry != LastEntry);
926
927 //
928 // We're done, release the lock and let the caller know how much we freed
929 //
930 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
931 return NumberOfPages;
932 }
933
934
935 BOOLEAN
936 NTAPI
937 MiRaisePoolQuota(IN POOL_TYPE PoolType,
938 IN ULONG CurrentMaxQuota,
939 OUT PULONG NewMaxQuota)
940 {
941 //
942 // Not implemented
943 //
944 UNIMPLEMENTED;
945 *NewMaxQuota = CurrentMaxQuota + 65536;
946 return TRUE;
947 }
948
949 /* PUBLIC FUNCTIONS ***********************************************************/
950
951 /*
952 * @unimplemented
953 */
954 PVOID
955 NTAPI
956 MmAllocateMappingAddress(IN SIZE_T NumberOfBytes,
957 IN ULONG PoolTag)
958 {
959 UNIMPLEMENTED;
960 return NULL;
961 }
962
963 /*
964 * @unimplemented
965 */
966 VOID
967 NTAPI
968 MmFreeMappingAddress(IN PVOID BaseAddress,
969 IN ULONG PoolTag)
970 {
971 UNIMPLEMENTED;
972 }
973
974 /* EOF */