[NTOS]: Even after allowing ARM3 paged pool, we should still use the old allocator...
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / pool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/pool.c
5 * PURPOSE: ARM Memory Manager Pool Allocator
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::POOL"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
18
19 /* GLOBALS ********************************************************************/
20
21 LIST_ENTRY MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
22 PFN_NUMBER MmNumberOfFreeNonPagedPool, MiExpansionPoolPagesInitialCharge;
23 PVOID MmNonPagedPoolEnd0;
24 PFN_NUMBER MiStartOfInitialPoolFrame, MiEndOfInitialPoolFrame;
25 KGUARDED_MUTEX MmPagedPoolMutex;
26 MM_PAGED_POOL_INFO MmPagedPoolInfo;
27 SIZE_T MmAllocatedNonPagedPool;
28 ULONG MmSpecialPoolTag;
29 ULONG MmConsumedPoolPercentage;
30 BOOLEAN MmProtectFreedNonPagedPool;
31
32 /* PRIVATE FUNCTIONS **********************************************************/
33
34 VOID
35 NTAPI
36 MiInitializeNonPagedPoolThresholds(VOID)
37 {
38 PFN_NUMBER Size = MmMaximumNonPagedPoolInPages;
39
40 /* Default low threshold of 8MB or one third of nonpaged pool */
41 MiLowNonPagedPoolThreshold = (8 * _1MB) >> PAGE_SHIFT;
42 MiLowNonPagedPoolThreshold = min(MiLowNonPagedPoolThreshold, Size / 3);
43
44 /* Default high threshold of 20MB or 50% */
45 MiHighNonPagedPoolThreshold = (20 * _1MB) >> PAGE_SHIFT;
46 MiHighNonPagedPoolThreshold = min(MiHighNonPagedPoolThreshold, Size / 2);
47 ASSERT(MiLowNonPagedPoolThreshold < MiHighNonPagedPoolThreshold);
48 }
49
50 VOID
51 NTAPI
52 MiInitializePoolEvents(VOID)
53 {
54 KIRQL OldIrql;
55 PFN_NUMBER FreePoolInPages;
56
57 /* Lock paged pool */
58 KeAcquireGuardedMutex(&MmPagedPoolMutex);
59
60 /* Total size of the paged pool minus the allocated size, is free */
61 FreePoolInPages = MmSizeOfPagedPoolInPages - MmPagedPoolInfo.AllocatedPagedPool;
62
63 /* Check the initial state high state */
64 if (FreePoolInPages >= MiHighPagedPoolThreshold)
65 {
66 /* We have plenty of pool */
67 KeSetEvent(MiHighPagedPoolEvent, 0, FALSE);
68 }
69 else
70 {
71 /* We don't */
72 KeClearEvent(MiHighPagedPoolEvent);
73 }
74
75 /* Check the initial low state */
76 if (FreePoolInPages <= MiLowPagedPoolThreshold)
77 {
78 /* We're very low in free pool memory */
79 KeSetEvent(MiLowPagedPoolEvent, 0, FALSE);
80 }
81 else
82 {
83 /* We're not */
84 KeClearEvent(MiLowPagedPoolEvent);
85 }
86
87 /* Release the paged pool lock */
88 KeReleaseGuardedMutex(&MmPagedPoolMutex);
89
90 /* Now it's time for the nonpaged pool lock */
91 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
92
93 /* Free pages are the maximum minus what's been allocated */
94 FreePoolInPages = MmMaximumNonPagedPoolInPages - MmAllocatedNonPagedPool;
95
96 /* Check if we have plenty */
97 if (FreePoolInPages >= MiHighNonPagedPoolThreshold)
98 {
99 /* We do, set the event */
100 KeSetEvent(MiHighNonPagedPoolEvent, 0, FALSE);
101 }
102 else
103 {
104 /* We don't, clear the event */
105 KeClearEvent(MiHighNonPagedPoolEvent);
106 }
107
108 /* Check if we have very little */
109 if (FreePoolInPages <= MiLowNonPagedPoolThreshold)
110 {
111 /* We do, set the event */
112 KeSetEvent(MiLowNonPagedPoolEvent, 0, FALSE);
113 }
114 else
115 {
116 /* We don't, clear it */
117 KeClearEvent(MiLowNonPagedPoolEvent);
118 }
119
120 /* We're done, release the nonpaged pool lock */
121 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
122 }
123
124 VOID
125 NTAPI
126 MiInitializeNonPagedPool(VOID)
127 {
128 ULONG i;
129 PFN_NUMBER PoolPages;
130 PMMFREE_POOL_ENTRY FreeEntry, FirstEntry;
131 PMMPTE PointerPte;
132 PAGED_CODE();
133
134 //
135 // We keep 4 lists of free pages (4 lists help avoid contention)
136 //
137 for (i = 0; i < MI_MAX_FREE_PAGE_LISTS; i++)
138 {
139 //
140 // Initialize each of them
141 //
142 InitializeListHead(&MmNonPagedPoolFreeListHead[i]);
143 }
144
145 //
146 // Calculate how many pages the initial nonpaged pool has
147 //
148 PoolPages = BYTES_TO_PAGES(MmSizeOfNonPagedPoolInBytes);
149 MmNumberOfFreeNonPagedPool = PoolPages;
150
151 //
152 // Initialize the first free entry
153 //
154 FreeEntry = MmNonPagedPoolStart;
155 FirstEntry = FreeEntry;
156 FreeEntry->Size = PoolPages;
157 FreeEntry->Owner = FirstEntry;
158
159 //
160 // Insert it into the last list
161 //
162 InsertHeadList(&MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS - 1],
163 &FreeEntry->List);
164
165 //
166 // Now create free entries for every single other page
167 //
168 while (PoolPages-- > 1)
169 {
170 //
171 // Link them all back to the original entry
172 //
173 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)FreeEntry + PAGE_SIZE);
174 FreeEntry->Owner = FirstEntry;
175 }
176
177 //
178 // Validate and remember first allocated pool page
179 //
180 PointerPte = MiAddressToPte(MmNonPagedPoolStart);
181 ASSERT(PointerPte->u.Hard.Valid == 1);
182 MiStartOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
183
184 //
185 // Keep track of where initial nonpaged pool ends
186 //
187 MmNonPagedPoolEnd0 = (PVOID)((ULONG_PTR)MmNonPagedPoolStart +
188 MmSizeOfNonPagedPoolInBytes);
189
190 //
191 // Validate and remember last allocated pool page
192 //
193 PointerPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolEnd0 - 1));
194 ASSERT(PointerPte->u.Hard.Valid == 1);
195 MiEndOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
196
197 //
198 // Validate the first nonpaged pool expansion page (which is a guard page)
199 //
200 PointerPte = MiAddressToPte(MmNonPagedPoolExpansionStart);
201 ASSERT(PointerPte->u.Hard.Valid == 0);
202
203 //
204 // Calculate the size of the expansion region alone
205 //
206 MiExpansionPoolPagesInitialCharge =
207 BYTES_TO_PAGES(MmMaximumNonPagedPoolInBytes - MmSizeOfNonPagedPoolInBytes);
208
209 //
210 // Remove 2 pages, since there's a guard page on top and on the bottom
211 //
212 MiExpansionPoolPagesInitialCharge -= 2;
213
214 //
215 // Now initialize the nonpaged pool expansion PTE space. Remember there's a
216 // guard page on top so make sure to skip it. The bottom guard page will be
217 // guaranteed by the fact our size is off by one.
218 //
219 MiInitializeSystemPtes(PointerPte + 1,
220 MiExpansionPoolPagesInitialCharge,
221 NonPagedPoolExpansion);
222 }
223
224 PVOID
225 NTAPI
226 MiAllocatePoolPages(IN POOL_TYPE PoolType,
227 IN SIZE_T SizeInBytes)
228 {
229 PFN_NUMBER SizeInPages, PageFrameNumber;
230 ULONG i;
231 KIRQL OldIrql;
232 PLIST_ENTRY NextEntry, NextHead, LastHead;
233 PMMPTE PointerPte, StartPte;
234 MMPTE TempPte;
235 PMMPFN Pfn1;
236 PVOID BaseVa, BaseVaStart;
237 PMMFREE_POOL_ENTRY FreeEntry;
238 PKSPIN_LOCK_QUEUE LockQueue;
239
240 //
241 // Figure out how big the allocation is in pages
242 //
243 SizeInPages = BYTES_TO_PAGES(SizeInBytes);
244
245 //
246 // Handle paged pool
247 //
248 if (PoolType == PagedPool)
249 {
250 //
251 // Lock the paged pool mutex
252 //
253 KeAcquireGuardedMutex(&MmPagedPoolMutex);
254
255 //
256 // Find some empty allocation space
257 //
258 i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
259 SizeInPages,
260 MmPagedPoolInfo.PagedPoolHint);
261 if (i == 0xFFFFFFFF)
262 {
263 //
264 // Get the page bit count
265 //
266 i = ((SizeInPages - 1) / 1024) + 1;
267 DPRINT1("Paged pool expansion: %d %x\n", i, SizeInPages);
268
269 //
270 // Check if there is enougn paged pool expansion space left
271 //
272 if (MmPagedPoolInfo.NextPdeForPagedPoolExpansion >
273 MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
274 {
275 //
276 // Out of memory!
277 //
278 DPRINT1("OUT OF PAGED POOL!!!\n");
279 KeReleaseGuardedMutex(&MmPagedPoolMutex);
280 return NULL;
281 }
282
283 //
284 // Check if we'll have to expand past the last PTE we have available
285 //
286 if (((i - 1) + MmPagedPoolInfo.NextPdeForPagedPoolExpansion) >
287 MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
288 {
289 //
290 // We can only support this much then
291 //
292 SizeInPages = MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool) -
293 MmPagedPoolInfo.NextPdeForPagedPoolExpansion +
294 1;
295 ASSERT(SizeInPages < i);
296 i = SizeInPages;
297 }
298 else
299 {
300 //
301 // Otherwise, there is plenty of space left for this expansion
302 //
303 SizeInPages = i;
304 }
305
306 //
307 // Get the template PTE we'll use to expand
308 //
309 TempPte = ValidKernelPte;
310
311 //
312 // Get the first PTE in expansion space
313 //
314 PointerPte = MmPagedPoolInfo.NextPdeForPagedPoolExpansion;
315 BaseVa = MiPteToAddress(PointerPte);
316 BaseVaStart = BaseVa;
317
318 //
319 // Lock the PFN database and loop pages
320 //
321 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
322 do
323 {
324 //
325 // It should not already be valid
326 //
327 ASSERT(PointerPte->u.Hard.Valid == 0);
328
329 /* Request a page */
330 PageFrameNumber = MiRemoveAnyPage(0);
331 TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
332
333 //
334 // Save it into our double-buffered system page directory
335 //
336 /* This seems to be making the assumption that one PDE is one page long */
337 C_ASSERT(PAGE_SIZE == (PD_COUNT * (sizeof(MMPTE) * PDE_COUNT)));
338 MmSystemPagePtes[(ULONG_PTR)PointerPte & (PAGE_SIZE - 1) /
339 sizeof(MMPTE)] = TempPte;
340
341 /* Initialize the PFN */
342 MiInitializePfnForOtherProcess(PageFrameNumber,
343 PointerPte,
344 MmSystemPageDirectory[(PointerPte - (PMMPTE)PDE_BASE) / PDE_COUNT]);
345
346 /* Write the actual PTE now */
347 ASSERT(TempPte.u.Hard.Valid == 1);
348 *PointerPte++ = TempPte;
349
350 //
351 // Move on to the next expansion address
352 //
353 BaseVa = (PVOID)((ULONG_PTR)BaseVa + PAGE_SIZE);
354 } while (--i > 0);
355
356 //
357 // Release the PFN database lock
358 //
359 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
360
361 //
362 // These pages are now available, clear their availablity bits
363 //
364 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap,
365 (MmPagedPoolInfo.NextPdeForPagedPoolExpansion -
366 MiAddressToPte(MmPagedPoolInfo.FirstPteForPagedPool)) *
367 1024,
368 SizeInPages * 1024);
369
370 //
371 // Update the next expansion location
372 //
373 MmPagedPoolInfo.NextPdeForPagedPoolExpansion += SizeInPages;
374
375 //
376 // Zero out the newly available memory
377 //
378 RtlZeroMemory(BaseVaStart, SizeInPages * PAGE_SIZE);
379
380 //
381 // Now try consuming the pages again
382 //
383 SizeInPages = BYTES_TO_PAGES(SizeInBytes);
384 i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
385 SizeInPages,
386 0);
387 if (i == 0xFFFFFFFF)
388 {
389 //
390 // Out of memory!
391 //
392 DPRINT1("OUT OF PAGED POOL!!!\n");
393 KeReleaseGuardedMutex(&MmPagedPoolMutex);
394 return NULL;
395 }
396 }
397
398 //
399 // Update the pool hint if the request was just one page
400 //
401 if (SizeInPages == 1) MmPagedPoolInfo.PagedPoolHint = i + 1;
402
403 //
404 // Update the end bitmap so we know the bounds of this allocation when
405 // the time comes to free it
406 //
407 RtlSetBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, i + SizeInPages - 1);
408
409 //
410 // Now we can release the lock (it mainly protects the bitmap)
411 //
412 KeReleaseGuardedMutex(&MmPagedPoolMutex);
413
414 //
415 // Now figure out where this allocation starts
416 //
417 BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT));
418
419 //
420 // Flush the TLB
421 //
422 KeFlushEntireTb(TRUE, TRUE);
423
424 /* Setup a demand-zero writable PTE */
425 DPRINT1("Setting up demand zero\n");
426 MI_MAKE_SOFTWARE_PTE(&TempPte, MM_READWRITE);
427
428 //
429 // Find the first and last PTE, then loop them all
430 //
431 PointerPte = MiAddressToPte(BaseVa);
432 StartPte = PointerPte + SizeInPages;
433 do
434 {
435 //
436 // Write the demand zero PTE and keep going
437 //
438 ASSERT(PointerPte->u.Hard.Valid == 0);
439 *PointerPte++ = TempPte;
440 } while (PointerPte < StartPte);
441
442 //
443 // Return the allocation address to the caller
444 //
445 return BaseVa;
446 }
447
448 //
449 // Allocations of less than 4 pages go into their individual buckets
450 //
451 i = SizeInPages - 1;
452 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
453
454 //
455 // Loop through all the free page lists based on the page index
456 //
457 NextHead = &MmNonPagedPoolFreeListHead[i];
458 LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
459
460 //
461 // Acquire the nonpaged pool lock
462 //
463 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
464 do
465 {
466 //
467 // Now loop through all the free page entries in this given list
468 //
469 NextEntry = NextHead->Flink;
470 while (NextEntry != NextHead)
471 {
472 //
473 // Grab the entry and see if it can handle our allocation
474 //
475 FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List);
476 if (FreeEntry->Size >= SizeInPages)
477 {
478 //
479 // It does, so consume the pages from here
480 //
481 FreeEntry->Size -= SizeInPages;
482
483 //
484 // The allocation will begin in this free page area
485 //
486 BaseVa = (PVOID)((ULONG_PTR)FreeEntry +
487 (FreeEntry->Size << PAGE_SHIFT));
488
489 //
490 // This is not a free page segment anymore
491 //
492 RemoveEntryList(&FreeEntry->List);
493
494 //
495 // However, check if its' still got space left
496 //
497 if (FreeEntry->Size != 0)
498 {
499 //
500 // Insert it back into a different list, based on its pages
501 //
502 i = FreeEntry->Size - 1;
503 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
504 InsertTailList (&MmNonPagedPoolFreeListHead[i],
505 &FreeEntry->List);
506 }
507
508 //
509 // Grab the PTE for this allocation
510 //
511 PointerPte = MiAddressToPte(BaseVa);
512 ASSERT(PointerPte->u.Hard.Valid == 1);
513
514 //
515 // Grab the PFN NextEntry and index
516 //
517 Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
518
519 //
520 // Now mark it as the beginning of an allocation
521 //
522 ASSERT(Pfn1->u3.e1.StartOfAllocation == 0);
523 Pfn1->u3.e1.StartOfAllocation = 1;
524
525 //
526 // Check if the allocation is larger than one page
527 //
528 if (SizeInPages != 1)
529 {
530 //
531 // Navigate to the last PFN entry and PTE
532 //
533 PointerPte += SizeInPages - 1;
534 ASSERT(PointerPte->u.Hard.Valid == 1);
535 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
536 }
537
538 //
539 // Mark this PFN as the last (might be the same as the first)
540 //
541 ASSERT(Pfn1->u3.e1.EndOfAllocation == 0);
542 Pfn1->u3.e1.EndOfAllocation = 1;
543
544 //
545 // Release the nonpaged pool lock, and return the allocation
546 //
547 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
548 return BaseVa;
549 }
550
551 //
552 // Try the next free page entry
553 //
554 NextEntry = FreeEntry->List.Flink;
555 }
556 } while (++NextHead < LastHead);
557
558 //
559 // If we got here, we're out of space.
560 // Start by releasing the lock
561 //
562 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
563
564 //
565 // Allocate some system PTEs
566 //
567 StartPte = MiReserveSystemPtes(SizeInPages, NonPagedPoolExpansion);
568 PointerPte = StartPte;
569 if (StartPte == NULL)
570 {
571 //
572 // Ran out of memory
573 //
574 DPRINT1("Out of NP Expansion Pool\n");
575 return NULL;
576 }
577
578 //
579 // Acquire the pool lock now
580 //
581 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
582
583 //
584 // Lock the PFN database too
585 //
586 LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueuePfnLock];
587 KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);
588
589 //
590 // Loop the pages
591 //
592 TempPte = ValidKernelPte;
593 do
594 {
595 /* Allocate a page */
596 PageFrameNumber = MiRemoveAnyPage(0);
597
598 /* Get the PFN entry for it and fill it out */
599 Pfn1 = MiGetPfnEntry(PageFrameNumber);
600 Pfn1->u3.e2.ReferenceCount = 1;
601 Pfn1->u2.ShareCount = 1;
602 Pfn1->PteAddress = PointerPte;
603 Pfn1->u3.e1.PageLocation = ActiveAndValid;
604 Pfn1->u4.VerifierAllocation = 0;
605
606 /* Write the PTE for it */
607 TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
608 ASSERT(PointerPte->u.Hard.Valid == 0);
609 ASSERT(TempPte.u.Hard.Valid == 1);
610 *PointerPte++ = TempPte;
611 } while (--SizeInPages > 0);
612
613 //
614 // This is the last page
615 //
616 Pfn1->u3.e1.EndOfAllocation = 1;
617
618 //
619 // Get the first page and mark it as such
620 //
621 Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber);
622 Pfn1->u3.e1.StartOfAllocation = 1;
623
624 //
625 // Release the PFN and nonpaged pool lock
626 //
627 KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
628 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
629
630 //
631 // Return the address
632 //
633 return MiPteToAddress(StartPte);
634 }
635
636 ULONG
637 NTAPI
638 MiFreePoolPages(IN PVOID StartingVa)
639 {
640 PMMPTE PointerPte, StartPte;
641 PMMPFN Pfn1, StartPfn;
642 PFN_NUMBER FreePages, NumberOfPages;
643 KIRQL OldIrql;
644 PMMFREE_POOL_ENTRY FreeEntry, NextEntry, LastEntry;
645 ULONG i, End;
646
647 //
648 // Handle paged pool
649 //
650 if ((StartingVa >= MmPagedPoolStart) && (StartingVa <= MmPagedPoolEnd))
651 {
652 //
653 // Calculate the offset from the beginning of paged pool, and convert it
654 // into pages
655 //
656 i = ((ULONG_PTR)StartingVa - (ULONG_PTR)MmPagedPoolStart) >> PAGE_SHIFT;
657 End = i;
658
659 //
660 // Now use the end bitmap to scan until we find a set bit, meaning that
661 // this allocation finishes here
662 //
663 while (!RtlTestBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End)) End++;
664
665 //
666 // Now calculate the total number of pages this allocation spans
667 //
668 NumberOfPages = End - i + 1;
669
670 /* Delete the actual pages */
671 PointerPte = MmPagedPoolInfo.FirstPteForPagedPool + i;
672 FreePages = MiDeleteSystemPageableVm(PointerPte, NumberOfPages, 0, NULL);
673 ASSERT(FreePages == NumberOfPages);
674
675 //
676 // Acquire the paged pool lock
677 //
678 KeAcquireGuardedMutex(&MmPagedPoolMutex);
679
680 //
681 // Clear the allocation and free bits
682 //
683 RtlClearBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, i);
684 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, i, NumberOfPages);
685
686 //
687 // Update the hint if we need to
688 //
689 if (i < MmPagedPoolInfo.PagedPoolHint) MmPagedPoolInfo.PagedPoolHint = i;
690
691 //
692 // Release the lock protecting the bitmaps
693 //
694 KeReleaseGuardedMutex(&MmPagedPoolMutex);
695
696 //
697 // And finally return the number of pages freed
698 //
699 return NumberOfPages;
700 }
701
702 //
703 // Get the first PTE and its corresponding PFN entry
704 //
705 StartPte = PointerPte = MiAddressToPte(StartingVa);
706 StartPfn = Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
707
708 //
709 // Loop until we find the last PTE
710 //
711 while (Pfn1->u3.e1.EndOfAllocation == 0)
712 {
713 //
714 // Keep going
715 //
716 PointerPte++;
717 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
718 }
719
720 //
721 // Now we know how many pages we have
722 //
723 NumberOfPages = PointerPte - StartPte + 1;
724
725 //
726 // Acquire the nonpaged pool lock
727 //
728 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
729
730 //
731 // Mark the first and last PTEs as not part of an allocation anymore
732 //
733 StartPfn->u3.e1.StartOfAllocation = 0;
734 Pfn1->u3.e1.EndOfAllocation = 0;
735
736 //
737 // Assume we will free as many pages as the allocation was
738 //
739 FreePages = NumberOfPages;
740
741 //
742 // Peek one page past the end of the allocation
743 //
744 PointerPte++;
745
746 //
747 // Guard against going past initial nonpaged pool
748 //
749 if (MiGetPfnEntryIndex(Pfn1) == MiEndOfInitialPoolFrame)
750 {
751 //
752 // This page is on the outskirts of initial nonpaged pool, so ignore it
753 //
754 Pfn1 = NULL;
755 }
756 else
757 {
758 //
759 // Otherwise, our entire allocation must've fit within the initial non
760 // paged pool, or the expansion nonpaged pool, so get the PFN entry of
761 // the next allocation
762 //
763 ASSERT((ULONG_PTR)StartingVa + NumberOfPages <= (ULONG_PTR)MmNonPagedPoolEnd);
764 if (PointerPte->u.Hard.Valid == 1)
765 {
766 //
767 // It's either expansion or initial: get the PFN entry
768 //
769 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
770 }
771 else
772 {
773 //
774 // This means we've reached the guard page that protects the end of
775 // the expansion nonpaged pool
776 //
777 Pfn1 = NULL;
778 }
779
780 }
781
782 //
783 // Check if this allocation actually exists
784 //
785 if ((Pfn1) && (Pfn1->u3.e1.StartOfAllocation == 0))
786 {
787 //
788 // It doesn't, so we should actually locate a free entry descriptor
789 //
790 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa +
791 (NumberOfPages << PAGE_SHIFT));
792 ASSERT(FreeEntry->Owner == FreeEntry);
793
794 //
795 // Consume this entry's pages, and remove it from its free list
796 //
797 FreePages += FreeEntry->Size;
798 RemoveEntryList (&FreeEntry->List);
799 }
800
801 //
802 // Now get the official free entry we'll create for the caller's allocation
803 //
804 FreeEntry = StartingVa;
805
806 //
807 // Check if the our allocation is the very first page
808 //
809 if (MiGetPfnEntryIndex(StartPfn) == MiStartOfInitialPoolFrame)
810 {
811 //
812 // Then we can't do anything or we'll risk underflowing
813 //
814 Pfn1 = NULL;
815 }
816 else
817 {
818 //
819 // Otherwise, get the PTE for the page right before our allocation
820 //
821 PointerPte -= NumberOfPages + 1;
822 if (PointerPte->u.Hard.Valid == 1)
823 {
824 //
825 // It's either expansion or initial nonpaged pool, get the PFN entry
826 //
827 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
828 }
829 else
830 {
831 //
832 // We must've reached the guard page, so don't risk touching it
833 //
834 Pfn1 = NULL;
835 }
836 }
837
838 //
839 // Check if there is a valid PFN entry for the page before the allocation
840 // and then check if this page was actually the end of an allocation.
841 // If it wasn't, then we know for sure it's a free page
842 //
843 if ((Pfn1) && (Pfn1->u3.e1.EndOfAllocation == 0))
844 {
845 //
846 // Get the free entry descriptor for that given page range
847 //
848 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa - PAGE_SIZE);
849 FreeEntry = FreeEntry->Owner;
850
851 //
852 // Check if the entry is small enough to be indexed on a free list
853 // If it is, we'll want to re-insert it, since we're about to
854 // collapse our pages on top of it, which will change its count
855 //
856 if (FreeEntry->Size < (MI_MAX_FREE_PAGE_LISTS - 1))
857 {
858 //
859 // Remove the list from where it is now
860 //
861 RemoveEntryList(&FreeEntry->List);
862
863 //
864 // Update its size
865 //
866 FreeEntry->Size += FreePages;
867
868 //
869 // And now find the new appropriate list to place it in
870 //
871 i = (ULONG)(FreeEntry->Size - 1);
872 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
873
874 //
875 // Do it
876 //
877 InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
878 }
879 else
880 {
881 //
882 // Otherwise, just combine our free pages into this entry
883 //
884 FreeEntry->Size += FreePages;
885 }
886 }
887
888 //
889 // Check if we were unable to do any compaction, and we'll stick with this
890 //
891 if (FreeEntry == StartingVa)
892 {
893 //
894 // Well, now we are a free entry. At worse we just have our newly freed
895 // pages, at best we have our pages plus whatever entry came after us
896 //
897 FreeEntry->Size = FreePages;
898
899 //
900 // Find the appropriate list we should be on
901 //
902 i = FreeEntry->Size - 1;
903 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
904
905 //
906 // And insert us
907 //
908 InsertTailList (&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
909 }
910
911 //
912 // Just a sanity check
913 //
914 ASSERT(FreePages != 0);
915
916 //
917 // Get all the pages between our allocation and its end. These will all now
918 // become free page chunks.
919 //
920 NextEntry = StartingVa;
921 LastEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + (FreePages << PAGE_SHIFT));
922 do
923 {
924 //
925 // Link back to the parent free entry, and keep going
926 //
927 NextEntry->Owner = FreeEntry;
928 NextEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + PAGE_SIZE);
929 } while (NextEntry != LastEntry);
930
931 //
932 // We're done, release the lock and let the caller know how much we freed
933 //
934 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
935 return NumberOfPages;
936 }
937
938
939 BOOLEAN
940 NTAPI
941 MiRaisePoolQuota(IN POOL_TYPE PoolType,
942 IN ULONG CurrentMaxQuota,
943 OUT PULONG NewMaxQuota)
944 {
945 //
946 // Not implemented
947 //
948 UNIMPLEMENTED;
949 *NewMaxQuota = CurrentMaxQuota + 65536;
950 return TRUE;
951 }
952
953 /* PUBLIC FUNCTIONS ***********************************************************/
954
955 /*
956 * @unimplemented
957 */
958 PVOID
959 NTAPI
960 MmAllocateMappingAddress(IN SIZE_T NumberOfBytes,
961 IN ULONG PoolTag)
962 {
963 UNIMPLEMENTED;
964 return NULL;
965 }
966
967 /*
968 * @unimplemented
969 */
970 VOID
971 NTAPI
972 MmFreeMappingAddress(IN PVOID BaseAddress,
973 IN ULONG PoolTag)
974 {
975 UNIMPLEMENTED;
976 }
977
978 /* EOF */