[NTOS]: When expanding paged pool, use MiRemoveAnyPage, not MmAllocPage.
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / pool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/pool.c
5 * PURPOSE: ARM Memory Manager Pool Allocator
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::POOL"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
18
19 /* GLOBALS ********************************************************************/
20
21 LIST_ENTRY MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
22 PFN_NUMBER MmNumberOfFreeNonPagedPool, MiExpansionPoolPagesInitialCharge;
23 PVOID MmNonPagedPoolEnd0;
24 PFN_NUMBER MiStartOfInitialPoolFrame, MiEndOfInitialPoolFrame;
25 KGUARDED_MUTEX MmPagedPoolMutex;
26 MM_PAGED_POOL_INFO MmPagedPoolInfo;
27 SIZE_T MmAllocatedNonPagedPool;
28 ULONG MmSpecialPoolTag;
29 ULONG MmConsumedPoolPercentage;
30 BOOLEAN MmProtectFreedNonPagedPool;
31
32 /* PRIVATE FUNCTIONS **********************************************************/
33
34 VOID
35 NTAPI
36 MiInitializeNonPagedPoolThresholds(VOID)
37 {
38 PFN_NUMBER Size = MmMaximumNonPagedPoolInPages;
39
40 /* Default low threshold of 8MB or one third of nonpaged pool */
41 MiLowNonPagedPoolThreshold = (8 * _1MB) >> PAGE_SHIFT;
42 MiLowNonPagedPoolThreshold = min(MiLowNonPagedPoolThreshold, Size / 3);
43
44 /* Default high threshold of 20MB or 50% */
45 MiHighNonPagedPoolThreshold = (20 * _1MB) >> PAGE_SHIFT;
46 MiHighNonPagedPoolThreshold = min(MiHighNonPagedPoolThreshold, Size / 2);
47 ASSERT(MiLowNonPagedPoolThreshold < MiHighNonPagedPoolThreshold);
48 }
49
50 VOID
51 NTAPI
52 MiInitializePoolEvents(VOID)
53 {
54 KIRQL OldIrql;
55 PFN_NUMBER FreePoolInPages;
56
57 /* Lock paged pool */
58 KeAcquireGuardedMutex(&MmPagedPoolMutex);
59
60 /* Total size of the paged pool minus the allocated size, is free */
61 FreePoolInPages = MmSizeOfPagedPoolInPages - MmPagedPoolInfo.AllocatedPagedPool;
62
63 /* Check the initial state high state */
64 if (FreePoolInPages >= MiHighPagedPoolThreshold)
65 {
66 /* We have plenty of pool */
67 KeSetEvent(MiHighPagedPoolEvent, 0, FALSE);
68 }
69 else
70 {
71 /* We don't */
72 KeClearEvent(MiHighPagedPoolEvent);
73 }
74
75 /* Check the initial low state */
76 if (FreePoolInPages <= MiLowPagedPoolThreshold)
77 {
78 /* We're very low in free pool memory */
79 KeSetEvent(MiLowPagedPoolEvent, 0, FALSE);
80 }
81 else
82 {
83 /* We're not */
84 KeClearEvent(MiLowPagedPoolEvent);
85 }
86
87 /* Release the paged pool lock */
88 KeReleaseGuardedMutex(&MmPagedPoolMutex);
89
90 /* Now it's time for the nonpaged pool lock */
91 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
92
93 /* Free pages are the maximum minus what's been allocated */
94 FreePoolInPages = MmMaximumNonPagedPoolInPages - MmAllocatedNonPagedPool;
95
96 /* Check if we have plenty */
97 if (FreePoolInPages >= MiHighNonPagedPoolThreshold)
98 {
99 /* We do, set the event */
100 KeSetEvent(MiHighNonPagedPoolEvent, 0, FALSE);
101 }
102 else
103 {
104 /* We don't, clear the event */
105 KeClearEvent(MiHighNonPagedPoolEvent);
106 }
107
108 /* Check if we have very little */
109 if (FreePoolInPages <= MiLowNonPagedPoolThreshold)
110 {
111 /* We do, set the event */
112 KeSetEvent(MiLowNonPagedPoolEvent, 0, FALSE);
113 }
114 else
115 {
116 /* We don't, clear it */
117 KeClearEvent(MiLowNonPagedPoolEvent);
118 }
119
120 /* We're done, release the nonpaged pool lock */
121 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
122 }
123
124 VOID
125 NTAPI
126 MiInitializeNonPagedPool(VOID)
127 {
128 ULONG i;
129 PFN_NUMBER PoolPages;
130 PMMFREE_POOL_ENTRY FreeEntry, FirstEntry;
131 PMMPTE PointerPte;
132 PAGED_CODE();
133
134 //
135 // We keep 4 lists of free pages (4 lists help avoid contention)
136 //
137 for (i = 0; i < MI_MAX_FREE_PAGE_LISTS; i++)
138 {
139 //
140 // Initialize each of them
141 //
142 InitializeListHead(&MmNonPagedPoolFreeListHead[i]);
143 }
144
145 //
146 // Calculate how many pages the initial nonpaged pool has
147 //
148 PoolPages = BYTES_TO_PAGES(MmSizeOfNonPagedPoolInBytes);
149 MmNumberOfFreeNonPagedPool = PoolPages;
150
151 //
152 // Initialize the first free entry
153 //
154 FreeEntry = MmNonPagedPoolStart;
155 FirstEntry = FreeEntry;
156 FreeEntry->Size = PoolPages;
157 FreeEntry->Owner = FirstEntry;
158
159 //
160 // Insert it into the last list
161 //
162 InsertHeadList(&MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS - 1],
163 &FreeEntry->List);
164
165 //
166 // Now create free entries for every single other page
167 //
168 while (PoolPages-- > 1)
169 {
170 //
171 // Link them all back to the original entry
172 //
173 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)FreeEntry + PAGE_SIZE);
174 FreeEntry->Owner = FirstEntry;
175 }
176
177 //
178 // Validate and remember first allocated pool page
179 //
180 PointerPte = MiAddressToPte(MmNonPagedPoolStart);
181 ASSERT(PointerPte->u.Hard.Valid == 1);
182 MiStartOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
183
184 //
185 // Keep track of where initial nonpaged pool ends
186 //
187 MmNonPagedPoolEnd0 = (PVOID)((ULONG_PTR)MmNonPagedPoolStart +
188 MmSizeOfNonPagedPoolInBytes);
189
190 //
191 // Validate and remember last allocated pool page
192 //
193 PointerPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolEnd0 - 1));
194 ASSERT(PointerPte->u.Hard.Valid == 1);
195 MiEndOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
196
197 //
198 // Validate the first nonpaged pool expansion page (which is a guard page)
199 //
200 PointerPte = MiAddressToPte(MmNonPagedPoolExpansionStart);
201 ASSERT(PointerPte->u.Hard.Valid == 0);
202
203 //
204 // Calculate the size of the expansion region alone
205 //
206 MiExpansionPoolPagesInitialCharge =
207 BYTES_TO_PAGES(MmMaximumNonPagedPoolInBytes - MmSizeOfNonPagedPoolInBytes);
208
209 //
210 // Remove 2 pages, since there's a guard page on top and on the bottom
211 //
212 MiExpansionPoolPagesInitialCharge -= 2;
213
214 //
215 // Now initialize the nonpaged pool expansion PTE space. Remember there's a
216 // guard page on top so make sure to skip it. The bottom guard page will be
217 // guaranteed by the fact our size is off by one.
218 //
219 MiInitializeSystemPtes(PointerPte + 1,
220 MiExpansionPoolPagesInitialCharge,
221 NonPagedPoolExpansion);
222 }
223
224 PVOID
225 NTAPI
226 MiAllocatePoolPages(IN POOL_TYPE PoolType,
227 IN SIZE_T SizeInBytes)
228 {
229 PFN_NUMBER SizeInPages, PageFrameNumber;
230 ULONG i;
231 KIRQL OldIrql;
232 PLIST_ENTRY NextEntry, NextHead, LastHead;
233 PMMPTE PointerPte, StartPte;
234 MMPTE TempPte;
235 PMMPFN Pfn1;
236 PVOID BaseVa, BaseVaStart;
237 PMMFREE_POOL_ENTRY FreeEntry;
238 PKSPIN_LOCK_QUEUE LockQueue;
239
240 //
241 // Figure out how big the allocation is in pages
242 //
243 SizeInPages = BYTES_TO_PAGES(SizeInBytes);
244
245 //
246 // Handle paged pool
247 //
248 if (PoolType == PagedPool)
249 {
250 //
251 // Lock the paged pool mutex
252 //
253 KeAcquireGuardedMutex(&MmPagedPoolMutex);
254
255 //
256 // Find some empty allocation space
257 //
258 i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
259 SizeInPages,
260 MmPagedPoolInfo.PagedPoolHint);
261 if (i == 0xFFFFFFFF)
262 {
263 //
264 // Get the page bit count
265 //
266 i = ((SizeInPages - 1) / 1024) + 1;
267
268 //
269 // Check if there is enougn paged pool expansion space left
270 //
271 if (MmPagedPoolInfo.NextPdeForPagedPoolExpansion >
272 MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
273 {
274 //
275 // Out of memory!
276 //
277 DPRINT1("OUT OF PAGED POOL!!!\n");
278 KeReleaseGuardedMutex(&MmPagedPoolMutex);
279 return NULL;
280 }
281
282 //
283 // Check if we'll have to expand past the last PTE we have available
284 //
285 if (((i - 1) + MmPagedPoolInfo.NextPdeForPagedPoolExpansion) >
286 MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
287 {
288 //
289 // We can only support this much then
290 //
291 SizeInPages = MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool) -
292 MmPagedPoolInfo.NextPdeForPagedPoolExpansion +
293 1;
294 ASSERT(SizeInPages < i);
295 i = SizeInPages;
296 }
297 else
298 {
299 //
300 // Otherwise, there is plenty of space left for this expansion
301 //
302 SizeInPages = i;
303 }
304
305 //
306 // Get the template PTE we'll use to expand
307 //
308 TempPte = ValidKernelPte;
309
310 //
311 // Get the first PTE in expansion space
312 //
313 PointerPte = MmPagedPoolInfo.NextPdeForPagedPoolExpansion;
314 BaseVa = MiPteToAddress(PointerPte);
315 BaseVaStart = BaseVa;
316
317 //
318 // Lock the PFN database and loop pages
319 //
320 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
321 do
322 {
323 //
324 // It should not already be valid
325 //
326 ASSERT(PointerPte->u.Hard.Valid == 0);
327
328 /* Request a page */
329 PageFrameNumber = MiRemoveAnyPage(0);
330 TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
331
332 //
333 // Save it into our double-buffered system page directory
334 //
335 /* This seems to be making the assumption that one PDE is one page long */
336 C_ASSERT(PAGE_SIZE == (PD_COUNT * (sizeof(MMPTE) * PDE_COUNT)));
337 MmSystemPagePtes[(ULONG_PTR)PointerPte & (PAGE_SIZE - 1) /
338 sizeof(MMPTE)] = TempPte;
339
340 /* Initialize the PFN */
341 MiInitializePfnForOtherProcess(PageFrameNumber,
342 PointerPte,
343 MmSystemPageDirectory[(PointerPte - (PMMPTE)PDE_BASE) / PDE_COUNT]);
344
345 /* Write the actual PTE now */
346 ASSERT(TempPte.u.Hard.Valid == 1);
347 *PointerPte++ = TempPte;
348
349 //
350 // Move on to the next expansion address
351 //
352 BaseVa = (PVOID)((ULONG_PTR)BaseVa + PAGE_SIZE);
353 } while (--i > 0);
354
355 //
356 // Release the PFN database lock
357 //
358 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
359
360 //
361 // These pages are now available, clear their availablity bits
362 //
363 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap,
364 (MmPagedPoolInfo.NextPdeForPagedPoolExpansion -
365 MiAddressToPte(MmPagedPoolInfo.FirstPteForPagedPool)) *
366 1024,
367 SizeInPages * 1024);
368
369 //
370 // Update the next expansion location
371 //
372 MmPagedPoolInfo.NextPdeForPagedPoolExpansion += SizeInPages;
373
374 //
375 // Zero out the newly available memory
376 //
377 RtlZeroMemory(BaseVaStart, SizeInPages * PAGE_SIZE);
378
379 //
380 // Now try consuming the pages again
381 //
382 SizeInPages = BYTES_TO_PAGES(SizeInBytes);
383 i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
384 SizeInPages,
385 0);
386 if (i == 0xFFFFFFFF)
387 {
388 //
389 // Out of memory!
390 //
391 DPRINT1("OUT OF PAGED POOL!!!\n");
392 KeReleaseGuardedMutex(&MmPagedPoolMutex);
393 return NULL;
394 }
395 }
396
397 //
398 // Update the pool hint if the request was just one page
399 //
400 if (SizeInPages == 1) MmPagedPoolInfo.PagedPoolHint = i + 1;
401
402 //
403 // Update the end bitmap so we know the bounds of this allocation when
404 // the time comes to free it
405 //
406 RtlSetBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, i + SizeInPages - 1);
407
408 //
409 // Now we can release the lock (it mainly protects the bitmap)
410 //
411 KeReleaseGuardedMutex(&MmPagedPoolMutex);
412
413 //
414 // Now figure out where this allocation starts
415 //
416 BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT));
417
418 //
419 // Flush the TLB
420 //
421 KeFlushEntireTb(TRUE, TRUE);
422
423 /* Setup a demand-zero writable PTE */
424 DPRINT1("Setting up demand zero\n");
425 MI_MAKE_SOFTWARE_PTE(&TempPte, MM_READWRITE);
426
427 //
428 // Find the first and last PTE, then loop them all
429 //
430 PointerPte = MiAddressToPte(BaseVa);
431 StartPte = PointerPte + SizeInPages;
432 do
433 {
434 //
435 // Write the demand zero PTE and keep going
436 //
437 ASSERT(PointerPte->u.Hard.Valid == 0);
438 *PointerPte++ = TempPte;
439 } while (PointerPte < StartPte);
440
441 //
442 // Return the allocation address to the caller
443 //
444 return BaseVa;
445 }
446
447 //
448 // Allocations of less than 4 pages go into their individual buckets
449 //
450 i = SizeInPages - 1;
451 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
452
453 //
454 // Loop through all the free page lists based on the page index
455 //
456 NextHead = &MmNonPagedPoolFreeListHead[i];
457 LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
458
459 //
460 // Acquire the nonpaged pool lock
461 //
462 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
463 do
464 {
465 //
466 // Now loop through all the free page entries in this given list
467 //
468 NextEntry = NextHead->Flink;
469 while (NextEntry != NextHead)
470 {
471 //
472 // Grab the entry and see if it can handle our allocation
473 //
474 FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List);
475 if (FreeEntry->Size >= SizeInPages)
476 {
477 //
478 // It does, so consume the pages from here
479 //
480 FreeEntry->Size -= SizeInPages;
481
482 //
483 // The allocation will begin in this free page area
484 //
485 BaseVa = (PVOID)((ULONG_PTR)FreeEntry +
486 (FreeEntry->Size << PAGE_SHIFT));
487
488 //
489 // This is not a free page segment anymore
490 //
491 RemoveEntryList(&FreeEntry->List);
492
493 //
494 // However, check if its' still got space left
495 //
496 if (FreeEntry->Size != 0)
497 {
498 //
499 // Insert it back into a different list, based on its pages
500 //
501 i = FreeEntry->Size - 1;
502 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
503 InsertTailList (&MmNonPagedPoolFreeListHead[i],
504 &FreeEntry->List);
505 }
506
507 //
508 // Grab the PTE for this allocation
509 //
510 PointerPte = MiAddressToPte(BaseVa);
511 ASSERT(PointerPte->u.Hard.Valid == 1);
512
513 //
514 // Grab the PFN NextEntry and index
515 //
516 Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
517
518 //
519 // Now mark it as the beginning of an allocation
520 //
521 ASSERT(Pfn1->u3.e1.StartOfAllocation == 0);
522 Pfn1->u3.e1.StartOfAllocation = 1;
523
524 //
525 // Check if the allocation is larger than one page
526 //
527 if (SizeInPages != 1)
528 {
529 //
530 // Navigate to the last PFN entry and PTE
531 //
532 PointerPte += SizeInPages - 1;
533 ASSERT(PointerPte->u.Hard.Valid == 1);
534 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
535 }
536
537 //
538 // Mark this PFN as the last (might be the same as the first)
539 //
540 ASSERT(Pfn1->u3.e1.EndOfAllocation == 0);
541 Pfn1->u3.e1.EndOfAllocation = 1;
542
543 //
544 // Release the nonpaged pool lock, and return the allocation
545 //
546 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
547 return BaseVa;
548 }
549
550 //
551 // Try the next free page entry
552 //
553 NextEntry = FreeEntry->List.Flink;
554 }
555 } while (++NextHead < LastHead);
556
557 //
558 // If we got here, we're out of space.
559 // Start by releasing the lock
560 //
561 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
562
563 //
564 // Allocate some system PTEs
565 //
566 StartPte = MiReserveSystemPtes(SizeInPages, NonPagedPoolExpansion);
567 PointerPte = StartPte;
568 if (StartPte == NULL)
569 {
570 //
571 // Ran out of memory
572 //
573 DPRINT1("Out of NP Expansion Pool\n");
574 return NULL;
575 }
576
577 //
578 // Acquire the pool lock now
579 //
580 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
581
582 //
583 // Lock the PFN database too
584 //
585 LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueuePfnLock];
586 KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);
587
588 //
589 // Loop the pages
590 //
591 TempPte = ValidKernelPte;
592 do
593 {
594 /* Allocate a page */
595 PageFrameNumber = MiRemoveAnyPage(0);
596
597 /* Get the PFN entry for it and fill it out */
598 Pfn1 = MiGetPfnEntry(PageFrameNumber);
599 Pfn1->u3.e2.ReferenceCount = 1;
600 Pfn1->u2.ShareCount = 1;
601 Pfn1->PteAddress = PointerPte;
602 Pfn1->u3.e1.PageLocation = ActiveAndValid;
603 Pfn1->u4.VerifierAllocation = 0;
604
605 /* Write the PTE for it */
606 TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
607 ASSERT(PointerPte->u.Hard.Valid == 0);
608 ASSERT(TempPte.u.Hard.Valid == 1);
609 *PointerPte++ = TempPte;
610 } while (--SizeInPages > 0);
611
612 //
613 // This is the last page
614 //
615 Pfn1->u3.e1.EndOfAllocation = 1;
616
617 //
618 // Get the first page and mark it as such
619 //
620 Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber);
621 Pfn1->u3.e1.StartOfAllocation = 1;
622
623 //
624 // Release the PFN and nonpaged pool lock
625 //
626 KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
627 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
628
629 //
630 // Return the address
631 //
632 return MiPteToAddress(StartPte);
633 }
634
635 ULONG
636 NTAPI
637 MiFreePoolPages(IN PVOID StartingVa)
638 {
639 PMMPTE PointerPte, StartPte;
640 PMMPFN Pfn1, StartPfn;
641 PFN_NUMBER FreePages, NumberOfPages;
642 KIRQL OldIrql;
643 PMMFREE_POOL_ENTRY FreeEntry, NextEntry, LastEntry;
644 ULONG i, End;
645
646 //
647 // Handle paged pool
648 //
649 if ((StartingVa >= MmPagedPoolStart) && (StartingVa <= MmPagedPoolEnd))
650 {
651 //
652 // Calculate the offset from the beginning of paged pool, and convert it
653 // into pages
654 //
655 i = ((ULONG_PTR)StartingVa - (ULONG_PTR)MmPagedPoolStart) >> PAGE_SHIFT;
656 End = i;
657
658 //
659 // Now use the end bitmap to scan until we find a set bit, meaning that
660 // this allocation finishes here
661 //
662 while (!RtlTestBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End)) End++;
663
664 //
665 // Now calculate the total number of pages this allocation spans
666 //
667 NumberOfPages = End - i + 1;
668
669 //
670 // Acquire the paged pool lock
671 //
672 KeAcquireGuardedMutex(&MmPagedPoolMutex);
673
674 //
675 // Clear the allocation and free bits
676 //
677 RtlClearBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, i);
678 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, i, NumberOfPages);
679
680 //
681 // Update the hint if we need to
682 //
683 if (i < MmPagedPoolInfo.PagedPoolHint) MmPagedPoolInfo.PagedPoolHint = i;
684
685 //
686 // Release the lock protecting the bitmaps
687 //
688 KeReleaseGuardedMutex(&MmPagedPoolMutex);
689
690 //
691 // And finally return the number of pages freed
692 //
693 return NumberOfPages;
694 }
695
696 //
697 // Get the first PTE and its corresponding PFN entry
698 //
699 StartPte = PointerPte = MiAddressToPte(StartingVa);
700 StartPfn = Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
701
702 //
703 // Loop until we find the last PTE
704 //
705 while (Pfn1->u3.e1.EndOfAllocation == 0)
706 {
707 //
708 // Keep going
709 //
710 PointerPte++;
711 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
712 }
713
714 //
715 // Now we know how many pages we have
716 //
717 NumberOfPages = PointerPte - StartPte + 1;
718
719 //
720 // Acquire the nonpaged pool lock
721 //
722 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
723
724 //
725 // Mark the first and last PTEs as not part of an allocation anymore
726 //
727 StartPfn->u3.e1.StartOfAllocation = 0;
728 Pfn1->u3.e1.EndOfAllocation = 0;
729
730 //
731 // Assume we will free as many pages as the allocation was
732 //
733 FreePages = NumberOfPages;
734
735 //
736 // Peek one page past the end of the allocation
737 //
738 PointerPte++;
739
740 //
741 // Guard against going past initial nonpaged pool
742 //
743 if (MiGetPfnEntryIndex(Pfn1) == MiEndOfInitialPoolFrame)
744 {
745 //
746 // This page is on the outskirts of initial nonpaged pool, so ignore it
747 //
748 Pfn1 = NULL;
749 }
750 else
751 {
752 //
753 // Otherwise, our entire allocation must've fit within the initial non
754 // paged pool, or the expansion nonpaged pool, so get the PFN entry of
755 // the next allocation
756 //
757 ASSERT((ULONG_PTR)StartingVa + NumberOfPages <= (ULONG_PTR)MmNonPagedPoolEnd);
758 if (PointerPte->u.Hard.Valid == 1)
759 {
760 //
761 // It's either expansion or initial: get the PFN entry
762 //
763 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
764 }
765 else
766 {
767 //
768 // This means we've reached the guard page that protects the end of
769 // the expansion nonpaged pool
770 //
771 Pfn1 = NULL;
772 }
773
774 }
775
776 //
777 // Check if this allocation actually exists
778 //
779 if ((Pfn1) && (Pfn1->u3.e1.StartOfAllocation == 0))
780 {
781 //
782 // It doesn't, so we should actually locate a free entry descriptor
783 //
784 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa +
785 (NumberOfPages << PAGE_SHIFT));
786 ASSERT(FreeEntry->Owner == FreeEntry);
787
788 //
789 // Consume this entry's pages, and remove it from its free list
790 //
791 FreePages += FreeEntry->Size;
792 RemoveEntryList (&FreeEntry->List);
793 }
794
795 //
796 // Now get the official free entry we'll create for the caller's allocation
797 //
798 FreeEntry = StartingVa;
799
800 //
801 // Check if the our allocation is the very first page
802 //
803 if (MiGetPfnEntryIndex(StartPfn) == MiStartOfInitialPoolFrame)
804 {
805 //
806 // Then we can't do anything or we'll risk underflowing
807 //
808 Pfn1 = NULL;
809 }
810 else
811 {
812 //
813 // Otherwise, get the PTE for the page right before our allocation
814 //
815 PointerPte -= NumberOfPages + 1;
816 if (PointerPte->u.Hard.Valid == 1)
817 {
818 //
819 // It's either expansion or initial nonpaged pool, get the PFN entry
820 //
821 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
822 }
823 else
824 {
825 //
826 // We must've reached the guard page, so don't risk touching it
827 //
828 Pfn1 = NULL;
829 }
830 }
831
832 //
833 // Check if there is a valid PFN entry for the page before the allocation
834 // and then check if this page was actually the end of an allocation.
835 // If it wasn't, then we know for sure it's a free page
836 //
837 if ((Pfn1) && (Pfn1->u3.e1.EndOfAllocation == 0))
838 {
839 //
840 // Get the free entry descriptor for that given page range
841 //
842 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa - PAGE_SIZE);
843 FreeEntry = FreeEntry->Owner;
844
845 //
846 // Check if the entry is small enough to be indexed on a free list
847 // If it is, we'll want to re-insert it, since we're about to
848 // collapse our pages on top of it, which will change its count
849 //
850 if (FreeEntry->Size < (MI_MAX_FREE_PAGE_LISTS - 1))
851 {
852 //
853 // Remove the list from where it is now
854 //
855 RemoveEntryList(&FreeEntry->List);
856
857 //
858 // Update its size
859 //
860 FreeEntry->Size += FreePages;
861
862 //
863 // And now find the new appropriate list to place it in
864 //
865 i = (ULONG)(FreeEntry->Size - 1);
866 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
867
868 //
869 // Do it
870 //
871 InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
872 }
873 else
874 {
875 //
876 // Otherwise, just combine our free pages into this entry
877 //
878 FreeEntry->Size += FreePages;
879 }
880 }
881
882 //
883 // Check if we were unable to do any compaction, and we'll stick with this
884 //
885 if (FreeEntry == StartingVa)
886 {
887 //
888 // Well, now we are a free entry. At worse we just have our newly freed
889 // pages, at best we have our pages plus whatever entry came after us
890 //
891 FreeEntry->Size = FreePages;
892
893 //
894 // Find the appropriate list we should be on
895 //
896 i = FreeEntry->Size - 1;
897 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
898
899 //
900 // And insert us
901 //
902 InsertTailList (&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
903 }
904
905 //
906 // Just a sanity check
907 //
908 ASSERT(FreePages != 0);
909
910 //
911 // Get all the pages between our allocation and its end. These will all now
912 // become free page chunks.
913 //
914 NextEntry = StartingVa;
915 LastEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + (FreePages << PAGE_SHIFT));
916 do
917 {
918 //
919 // Link back to the parent free entry, and keep going
920 //
921 NextEntry->Owner = FreeEntry;
922 NextEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + PAGE_SIZE);
923 } while (NextEntry != LastEntry);
924
925 //
926 // We're done, release the lock and let the caller know how much we freed
927 //
928 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
929 return NumberOfPages;
930 }
931
932
933 BOOLEAN
934 NTAPI
935 MiRaisePoolQuota(IN POOL_TYPE PoolType,
936 IN ULONG CurrentMaxQuota,
937 OUT PULONG NewMaxQuota)
938 {
939 //
940 // Not implemented
941 //
942 UNIMPLEMENTED;
943 *NewMaxQuota = CurrentMaxQuota + 65536;
944 return TRUE;
945 }
946
947 /* PUBLIC FUNCTIONS ***********************************************************/
948
949 /*
950 * @unimplemented
951 */
952 PVOID
953 NTAPI
954 MmAllocateMappingAddress(IN SIZE_T NumberOfBytes,
955 IN ULONG PoolTag)
956 {
957 UNIMPLEMENTED;
958 return NULL;
959 }
960
961 /*
962 * @unimplemented
963 */
964 VOID
965 NTAPI
966 MmFreeMappingAddress(IN PVOID BaseAddress,
967 IN ULONG PoolTag)
968 {
969 UNIMPLEMENTED;
970 }
971
972 /* EOF */