[NTOSKRNL]
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / pool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/pool.c
5 * PURPOSE: ARM Memory Manager Pool Allocator
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::POOL"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
18
19 /* GLOBALS ********************************************************************/
20
21 LIST_ENTRY MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
22 PFN_NUMBER MmNumberOfFreeNonPagedPool, MiExpansionPoolPagesInitialCharge;
23 PVOID MmNonPagedPoolEnd0;
24 PFN_NUMBER MiStartOfInitialPoolFrame, MiEndOfInitialPoolFrame;
25 KGUARDED_MUTEX MmPagedPoolMutex;
26 MM_PAGED_POOL_INFO MmPagedPoolInfo;
27 SIZE_T MmAllocatedNonPagedPool;
28 ULONG MmSpecialPoolTag;
29 ULONG MmConsumedPoolPercentage;
30 BOOLEAN MmProtectFreedNonPagedPool;
31
32 /* PRIVATE FUNCTIONS **********************************************************/
33
34 VOID
35 NTAPI
36 MiInitializeNonPagedPoolThresholds(VOID)
37 {
38 PFN_NUMBER Size = MmMaximumNonPagedPoolInPages;
39
40 /* Default low threshold of 8MB or one third of nonpaged pool */
41 MiLowNonPagedPoolThreshold = (8 * _1MB) >> PAGE_SHIFT;
42 MiLowNonPagedPoolThreshold = min(MiLowNonPagedPoolThreshold, Size / 3);
43
44 /* Default high threshold of 20MB or 50% */
45 MiHighNonPagedPoolThreshold = (20 * _1MB) >> PAGE_SHIFT;
46 MiHighNonPagedPoolThreshold = min(MiHighNonPagedPoolThreshold, Size / 2);
47 ASSERT(MiLowNonPagedPoolThreshold < MiHighNonPagedPoolThreshold);
48 }
49
50 VOID
51 NTAPI
52 MiInitializePoolEvents(VOID)
53 {
54 KIRQL OldIrql;
55 PFN_NUMBER FreePoolInPages;
56
57 /* Lock paged pool */
58 KeAcquireGuardedMutex(&MmPagedPoolMutex);
59
60 /* Total size of the paged pool minus the allocated size, is free */
61 FreePoolInPages = MmSizeOfPagedPoolInPages - MmPagedPoolInfo.AllocatedPagedPool;
62
63 /* Check the initial state high state */
64 if (FreePoolInPages >= MiHighPagedPoolThreshold)
65 {
66 /* We have plenty of pool */
67 KeSetEvent(MiHighPagedPoolEvent, 0, FALSE);
68 }
69 else
70 {
71 /* We don't */
72 KeClearEvent(MiHighPagedPoolEvent);
73 }
74
75 /* Check the initial low state */
76 if (FreePoolInPages <= MiLowPagedPoolThreshold)
77 {
78 /* We're very low in free pool memory */
79 KeSetEvent(MiLowPagedPoolEvent, 0, FALSE);
80 }
81 else
82 {
83 /* We're not */
84 KeClearEvent(MiLowPagedPoolEvent);
85 }
86
87 /* Release the paged pool lock */
88 KeReleaseGuardedMutex(&MmPagedPoolMutex);
89
90 /* Now it's time for the nonpaged pool lock */
91 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
92
93 /* Free pages are the maximum minus what's been allocated */
94 FreePoolInPages = MmMaximumNonPagedPoolInPages - MmAllocatedNonPagedPool;
95
96 /* Check if we have plenty */
97 if (FreePoolInPages >= MiHighNonPagedPoolThreshold)
98 {
99 /* We do, set the event */
100 KeSetEvent(MiHighNonPagedPoolEvent, 0, FALSE);
101 }
102 else
103 {
104 /* We don't, clear the event */
105 KeClearEvent(MiHighNonPagedPoolEvent);
106 }
107
108 /* Check if we have very little */
109 if (FreePoolInPages <= MiLowNonPagedPoolThreshold)
110 {
111 /* We do, set the event */
112 KeSetEvent(MiLowNonPagedPoolEvent, 0, FALSE);
113 }
114 else
115 {
116 /* We don't, clear it */
117 KeClearEvent(MiLowNonPagedPoolEvent);
118 }
119
120 /* We're done, release the nonpaged pool lock */
121 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
122 }
123
124 VOID
125 NTAPI
126 MiInitializeNonPagedPool(VOID)
127 {
128 ULONG i;
129 PFN_NUMBER PoolPages;
130 PMMFREE_POOL_ENTRY FreeEntry, FirstEntry;
131 PMMPTE PointerPte;
132 PAGED_CODE();
133
134 //
135 // We keep 4 lists of free pages (4 lists help avoid contention)
136 //
137 for (i = 0; i < MI_MAX_FREE_PAGE_LISTS; i++)
138 {
139 //
140 // Initialize each of them
141 //
142 InitializeListHead(&MmNonPagedPoolFreeListHead[i]);
143 }
144
145 //
146 // Calculate how many pages the initial nonpaged pool has
147 //
148 PoolPages = BYTES_TO_PAGES(MmSizeOfNonPagedPoolInBytes);
149 MmNumberOfFreeNonPagedPool = PoolPages;
150
151 //
152 // Initialize the first free entry
153 //
154 FreeEntry = MmNonPagedPoolStart;
155 FirstEntry = FreeEntry;
156 FreeEntry->Size = PoolPages;
157 FreeEntry->Owner = FirstEntry;
158
159 //
160 // Insert it into the last list
161 //
162 InsertHeadList(&MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS - 1],
163 &FreeEntry->List);
164
165 //
166 // Now create free entries for every single other page
167 //
168 while (PoolPages-- > 1)
169 {
170 //
171 // Link them all back to the original entry
172 //
173 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)FreeEntry + PAGE_SIZE);
174 FreeEntry->Owner = FirstEntry;
175 }
176
177 //
178 // Validate and remember first allocated pool page
179 //
180 PointerPte = MiAddressToPte(MmNonPagedPoolStart);
181 ASSERT(PointerPte->u.Hard.Valid == 1);
182 MiStartOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
183
184 //
185 // Keep track of where initial nonpaged pool ends
186 //
187 MmNonPagedPoolEnd0 = (PVOID)((ULONG_PTR)MmNonPagedPoolStart +
188 MmSizeOfNonPagedPoolInBytes);
189
190 //
191 // Validate and remember last allocated pool page
192 //
193 PointerPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolEnd0 - 1));
194 ASSERT(PointerPte->u.Hard.Valid == 1);
195 MiEndOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
196
197 //
198 // Validate the first nonpaged pool expansion page (which is a guard page)
199 //
200 PointerPte = MiAddressToPte(MmNonPagedPoolExpansionStart);
201 ASSERT(PointerPte->u.Hard.Valid == 0);
202
203 //
204 // Calculate the size of the expansion region alone
205 //
206 MiExpansionPoolPagesInitialCharge =
207 BYTES_TO_PAGES(MmMaximumNonPagedPoolInBytes - MmSizeOfNonPagedPoolInBytes);
208
209 //
210 // Remove 2 pages, since there's a guard page on top and on the bottom
211 //
212 MiExpansionPoolPagesInitialCharge -= 2;
213
214 //
215 // Now initialize the nonpaged pool expansion PTE space. Remember there's a
216 // guard page on top so make sure to skip it. The bottom guard page will be
217 // guaranteed by the fact our size is off by one.
218 //
219 MiInitializeSystemPtes(PointerPte + 1,
220 MiExpansionPoolPagesInitialCharge,
221 NonPagedPoolExpansion);
222 }
223
224 PVOID
225 NTAPI
226 MiAllocatePoolPages(IN POOL_TYPE PoolType,
227 IN SIZE_T SizeInBytes)
228 {
229 PFN_NUMBER SizeInPages, PageFrameNumber;
230 ULONG i;
231 KIRQL OldIrql;
232 PLIST_ENTRY NextEntry, NextHead, LastHead;
233 PMMPTE PointerPte, StartPte;
234 MMPTE TempPte;
235 PMMPFN Pfn1;
236 PVOID BaseVa, BaseVaStart;
237 PMMFREE_POOL_ENTRY FreeEntry;
238 PKSPIN_LOCK_QUEUE LockQueue;
239
240 //
241 // Figure out how big the allocation is in pages
242 //
243 SizeInPages = BYTES_TO_PAGES(SizeInBytes);
244
245 //
246 // Handle paged pool
247 //
248 if (PoolType == PagedPool)
249 {
250 //
251 // Lock the paged pool mutex
252 //
253 KeAcquireGuardedMutex(&MmPagedPoolMutex);
254
255 //
256 // Find some empty allocation space
257 //
258 i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
259 SizeInPages,
260 MmPagedPoolInfo.PagedPoolHint);
261 if (i == 0xFFFFFFFF)
262 {
263 //
264 // Get the page bit count
265 //
266 i = ((SizeInPages - 1) / 1024) + 1;
267 DPRINT1("Paged pool expansion: %d %x\n", i, SizeInPages);
268
269 //
270 // Check if there is enougn paged pool expansion space left
271 //
272 if (MmPagedPoolInfo.NextPdeForPagedPoolExpansion >
273 MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
274 {
275 //
276 // Out of memory!
277 //
278 DPRINT1("OUT OF PAGED POOL!!!\n");
279 KeReleaseGuardedMutex(&MmPagedPoolMutex);
280 return NULL;
281 }
282
283 //
284 // Check if we'll have to expand past the last PTE we have available
285 //
286 if (((i - 1) + MmPagedPoolInfo.NextPdeForPagedPoolExpansion) >
287 MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
288 {
289 //
290 // We can only support this much then
291 //
292 SizeInPages = MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool) -
293 MmPagedPoolInfo.NextPdeForPagedPoolExpansion +
294 1;
295 ASSERT(SizeInPages < i);
296 i = SizeInPages;
297 }
298 else
299 {
300 //
301 // Otherwise, there is plenty of space left for this expansion
302 //
303 SizeInPages = i;
304 }
305
306 //
307 // Get the template PTE we'll use to expand
308 //
309 TempPte = ValidKernelPte;
310
311 //
312 // Get the first PTE in expansion space
313 //
314 PointerPte = MmPagedPoolInfo.NextPdeForPagedPoolExpansion;
315 BaseVa = MiPteToAddress(PointerPte);
316 BaseVaStart = BaseVa;
317
318 //
319 // Lock the PFN database and loop pages
320 //
321 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
322 do
323 {
324 //
325 // It should not already be valid
326 //
327 ASSERT(PointerPte->u.Hard.Valid == 0);
328
329 /* Request a page */
330 PageFrameNumber = MiRemoveAnyPage(0);
331 TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
332
333 //
334 // Save it into our double-buffered system page directory
335 //
336 #ifndef _M_AMD64
337 /* This seems to be making the assumption that one PDE is one page long */
338 C_ASSERT(PAGE_SIZE == (PD_COUNT * (sizeof(MMPTE) * PDE_COUNT)));
339 #endif
340 MmSystemPagePtes[(ULONG_PTR)PointerPte & (PAGE_SIZE - 1) /
341 sizeof(MMPTE)] = TempPte;
342
343 /* Initialize the PFN */
344 MiInitializePfnForOtherProcess(PageFrameNumber,
345 PointerPte,
346 MmSystemPageDirectory[(PointerPte - (PMMPTE)PDE_BASE) / PDE_COUNT]);
347
348 /* Write the actual PTE now */
349 MI_WRITE_VALID_PTE(PointerPte++, TempPte);
350
351 //
352 // Move on to the next expansion address
353 //
354 BaseVa = (PVOID)((ULONG_PTR)BaseVa + PAGE_SIZE);
355 } while (--i > 0);
356
357 //
358 // Release the PFN database lock
359 //
360 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
361
362 //
363 // These pages are now available, clear their availablity bits
364 //
365 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap,
366 (MmPagedPoolInfo.NextPdeForPagedPoolExpansion -
367 MiAddressToPte(MmPagedPoolInfo.FirstPteForPagedPool)) *
368 1024,
369 SizeInPages * 1024);
370
371 //
372 // Update the next expansion location
373 //
374 MmPagedPoolInfo.NextPdeForPagedPoolExpansion += SizeInPages;
375
376 //
377 // Zero out the newly available memory
378 //
379 RtlZeroMemory(BaseVaStart, SizeInPages * PAGE_SIZE);
380
381 //
382 // Now try consuming the pages again
383 //
384 SizeInPages = BYTES_TO_PAGES(SizeInBytes);
385 i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
386 SizeInPages,
387 0);
388 if (i == 0xFFFFFFFF)
389 {
390 //
391 // Out of memory!
392 //
393 DPRINT1("OUT OF PAGED POOL!!!\n");
394 KeReleaseGuardedMutex(&MmPagedPoolMutex);
395 return NULL;
396 }
397 }
398
399 //
400 // Update the pool hint if the request was just one page
401 //
402 if (SizeInPages == 1) MmPagedPoolInfo.PagedPoolHint = i + 1;
403
404 //
405 // Update the end bitmap so we know the bounds of this allocation when
406 // the time comes to free it
407 //
408 RtlSetBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, i + SizeInPages - 1);
409
410 //
411 // Now we can release the lock (it mainly protects the bitmap)
412 //
413 KeReleaseGuardedMutex(&MmPagedPoolMutex);
414
415 //
416 // Now figure out where this allocation starts
417 //
418 BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT));
419
420 //
421 // Flush the TLB
422 //
423 KeFlushEntireTb(TRUE, TRUE);
424
425 /* Setup a demand-zero writable PTE */
426 MI_MAKE_SOFTWARE_PTE(&TempPte, MM_READWRITE);
427
428 //
429 // Find the first and last PTE, then loop them all
430 //
431 PointerPte = MiAddressToPte(BaseVa);
432 StartPte = PointerPte + SizeInPages;
433 do
434 {
435 //
436 // Write the demand zero PTE and keep going
437 //
438 ASSERT(PointerPte->u.Hard.Valid == 0);
439 *PointerPte++ = TempPte;
440 } while (PointerPte < StartPte);
441
442 //
443 // Return the allocation address to the caller
444 //
445 return BaseVa;
446 }
447
448 //
449 // Allocations of less than 4 pages go into their individual buckets
450 //
451 i = SizeInPages - 1;
452 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
453
454 //
455 // Loop through all the free page lists based on the page index
456 //
457 NextHead = &MmNonPagedPoolFreeListHead[i];
458 LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
459
460 //
461 // Acquire the nonpaged pool lock
462 //
463 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
464 do
465 {
466 //
467 // Now loop through all the free page entries in this given list
468 //
469 NextEntry = NextHead->Flink;
470 while (NextEntry != NextHead)
471 {
472 //
473 // Grab the entry and see if it can handle our allocation
474 //
475 FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List);
476 if (FreeEntry->Size >= SizeInPages)
477 {
478 //
479 // It does, so consume the pages from here
480 //
481 FreeEntry->Size -= SizeInPages;
482
483 //
484 // The allocation will begin in this free page area
485 //
486 BaseVa = (PVOID)((ULONG_PTR)FreeEntry +
487 (FreeEntry->Size << PAGE_SHIFT));
488
489 //
490 // This is not a free page segment anymore
491 //
492 RemoveEntryList(&FreeEntry->List);
493
494 //
495 // However, check if its' still got space left
496 //
497 if (FreeEntry->Size != 0)
498 {
499 //
500 // Insert it back into a different list, based on its pages
501 //
502 i = FreeEntry->Size - 1;
503 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
504 InsertTailList (&MmNonPagedPoolFreeListHead[i],
505 &FreeEntry->List);
506 }
507
508 //
509 // Grab the PTE for this allocation
510 //
511 PointerPte = MiAddressToPte(BaseVa);
512 ASSERT(PointerPte->u.Hard.Valid == 1);
513
514 //
515 // Grab the PFN NextEntry and index
516 //
517 Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
518
519 //
520 // Now mark it as the beginning of an allocation
521 //
522 ASSERT(Pfn1->u3.e1.StartOfAllocation == 0);
523 Pfn1->u3.e1.StartOfAllocation = 1;
524
525 //
526 // Check if the allocation is larger than one page
527 //
528 if (SizeInPages != 1)
529 {
530 //
531 // Navigate to the last PFN entry and PTE
532 //
533 PointerPte += SizeInPages - 1;
534 ASSERT(PointerPte->u.Hard.Valid == 1);
535 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
536 }
537
538 //
539 // Mark this PFN as the last (might be the same as the first)
540 //
541 ASSERT(Pfn1->u3.e1.EndOfAllocation == 0);
542 Pfn1->u3.e1.EndOfAllocation = 1;
543
544 //
545 // Release the nonpaged pool lock, and return the allocation
546 //
547 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
548 return BaseVa;
549 }
550
551 //
552 // Try the next free page entry
553 //
554 NextEntry = FreeEntry->List.Flink;
555 }
556 } while (++NextHead < LastHead);
557
558 //
559 // If we got here, we're out of space.
560 // Start by releasing the lock
561 //
562 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
563
564 //
565 // Allocate some system PTEs
566 //
567 StartPte = MiReserveSystemPtes(SizeInPages, NonPagedPoolExpansion);
568 PointerPte = StartPte;
569 if (StartPte == NULL)
570 {
571 //
572 // Ran out of memory
573 //
574 DPRINT1("Out of NP Expansion Pool\n");
575 return NULL;
576 }
577
578 //
579 // Acquire the pool lock now
580 //
581 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
582
583 //
584 // Lock the PFN database too
585 //
586 LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueuePfnLock];
587 KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);
588
589 //
590 // Loop the pages
591 //
592 TempPte = ValidKernelPte;
593 do
594 {
595 /* Allocate a page */
596 PageFrameNumber = MiRemoveAnyPage(0);
597
598 /* Get the PFN entry for it and fill it out */
599 Pfn1 = MiGetPfnEntry(PageFrameNumber);
600 Pfn1->u3.e2.ReferenceCount = 1;
601 Pfn1->u2.ShareCount = 1;
602 Pfn1->PteAddress = PointerPte;
603 Pfn1->u3.e1.PageLocation = ActiveAndValid;
604 Pfn1->u4.VerifierAllocation = 0;
605
606 /* Write the PTE for it */
607 TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
608 MI_WRITE_VALID_PTE(PointerPte++, TempPte);
609 } while (--SizeInPages > 0);
610
611 //
612 // This is the last page
613 //
614 Pfn1->u3.e1.EndOfAllocation = 1;
615
616 //
617 // Get the first page and mark it as such
618 //
619 Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber);
620 Pfn1->u3.e1.StartOfAllocation = 1;
621
622 //
623 // Release the PFN and nonpaged pool lock
624 //
625 KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
626 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
627
628 //
629 // Return the address
630 //
631 return MiPteToAddress(StartPte);
632 }
633
634 ULONG
635 NTAPI
636 MiFreePoolPages(IN PVOID StartingVa)
637 {
638 PMMPTE PointerPte, StartPte;
639 PMMPFN Pfn1, StartPfn;
640 PFN_NUMBER FreePages, NumberOfPages;
641 KIRQL OldIrql;
642 PMMFREE_POOL_ENTRY FreeEntry, NextEntry, LastEntry;
643 ULONG i, End;
644
645 //
646 // Handle paged pool
647 //
648 if ((StartingVa >= MmPagedPoolStart) && (StartingVa <= MmPagedPoolEnd))
649 {
650 //
651 // Calculate the offset from the beginning of paged pool, and convert it
652 // into pages
653 //
654 i = ((ULONG_PTR)StartingVa - (ULONG_PTR)MmPagedPoolStart) >> PAGE_SHIFT;
655 End = i;
656
657 //
658 // Now use the end bitmap to scan until we find a set bit, meaning that
659 // this allocation finishes here
660 //
661 while (!RtlTestBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End)) End++;
662
663 //
664 // Now calculate the total number of pages this allocation spans
665 //
666 NumberOfPages = End - i + 1;
667
668 /* Delete the actual pages */
669 PointerPte = MmPagedPoolInfo.FirstPteForPagedPool + i;
670 FreePages = MiDeleteSystemPageableVm(PointerPte, NumberOfPages, 0, NULL);
671 ASSERT(FreePages == NumberOfPages);
672
673 //
674 // Acquire the paged pool lock
675 //
676 KeAcquireGuardedMutex(&MmPagedPoolMutex);
677
678 //
679 // Clear the allocation and free bits
680 //
681 RtlClearBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, i);
682 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, i, NumberOfPages);
683
684 //
685 // Update the hint if we need to
686 //
687 if (i < MmPagedPoolInfo.PagedPoolHint) MmPagedPoolInfo.PagedPoolHint = i;
688
689 //
690 // Release the lock protecting the bitmaps
691 //
692 KeReleaseGuardedMutex(&MmPagedPoolMutex);
693
694 //
695 // And finally return the number of pages freed
696 //
697 return NumberOfPages;
698 }
699
700 //
701 // Get the first PTE and its corresponding PFN entry
702 //
703 StartPte = PointerPte = MiAddressToPte(StartingVa);
704 StartPfn = Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
705
706 //
707 // Loop until we find the last PTE
708 //
709 while (Pfn1->u3.e1.EndOfAllocation == 0)
710 {
711 //
712 // Keep going
713 //
714 PointerPte++;
715 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
716 }
717
718 //
719 // Now we know how many pages we have
720 //
721 NumberOfPages = PointerPte - StartPte + 1;
722
723 //
724 // Acquire the nonpaged pool lock
725 //
726 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
727
728 //
729 // Mark the first and last PTEs as not part of an allocation anymore
730 //
731 StartPfn->u3.e1.StartOfAllocation = 0;
732 Pfn1->u3.e1.EndOfAllocation = 0;
733
734 //
735 // Assume we will free as many pages as the allocation was
736 //
737 FreePages = NumberOfPages;
738
739 //
740 // Peek one page past the end of the allocation
741 //
742 PointerPte++;
743
744 //
745 // Guard against going past initial nonpaged pool
746 //
747 if (MiGetPfnEntryIndex(Pfn1) == MiEndOfInitialPoolFrame)
748 {
749 //
750 // This page is on the outskirts of initial nonpaged pool, so ignore it
751 //
752 Pfn1 = NULL;
753 }
754 else
755 {
756 //
757 // Otherwise, our entire allocation must've fit within the initial non
758 // paged pool, or the expansion nonpaged pool, so get the PFN entry of
759 // the next allocation
760 //
761 ASSERT((ULONG_PTR)StartingVa + NumberOfPages <= (ULONG_PTR)MmNonPagedPoolEnd);
762 if (PointerPte->u.Hard.Valid == 1)
763 {
764 //
765 // It's either expansion or initial: get the PFN entry
766 //
767 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
768 }
769 else
770 {
771 //
772 // This means we've reached the guard page that protects the end of
773 // the expansion nonpaged pool
774 //
775 Pfn1 = NULL;
776 }
777
778 }
779
780 //
781 // Check if this allocation actually exists
782 //
783 if ((Pfn1) && (Pfn1->u3.e1.StartOfAllocation == 0))
784 {
785 //
786 // It doesn't, so we should actually locate a free entry descriptor
787 //
788 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa +
789 (NumberOfPages << PAGE_SHIFT));
790 ASSERT(FreeEntry->Owner == FreeEntry);
791
792 //
793 // Consume this entry's pages, and remove it from its free list
794 //
795 FreePages += FreeEntry->Size;
796 RemoveEntryList (&FreeEntry->List);
797 }
798
799 //
800 // Now get the official free entry we'll create for the caller's allocation
801 //
802 FreeEntry = StartingVa;
803
804 //
805 // Check if the our allocation is the very first page
806 //
807 if (MiGetPfnEntryIndex(StartPfn) == MiStartOfInitialPoolFrame)
808 {
809 //
810 // Then we can't do anything or we'll risk underflowing
811 //
812 Pfn1 = NULL;
813 }
814 else
815 {
816 //
817 // Otherwise, get the PTE for the page right before our allocation
818 //
819 PointerPte -= NumberOfPages + 1;
820 if (PointerPte->u.Hard.Valid == 1)
821 {
822 //
823 // It's either expansion or initial nonpaged pool, get the PFN entry
824 //
825 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
826 }
827 else
828 {
829 //
830 // We must've reached the guard page, so don't risk touching it
831 //
832 Pfn1 = NULL;
833 }
834 }
835
836 //
837 // Check if there is a valid PFN entry for the page before the allocation
838 // and then check if this page was actually the end of an allocation.
839 // If it wasn't, then we know for sure it's a free page
840 //
841 if ((Pfn1) && (Pfn1->u3.e1.EndOfAllocation == 0))
842 {
843 //
844 // Get the free entry descriptor for that given page range
845 //
846 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa - PAGE_SIZE);
847 FreeEntry = FreeEntry->Owner;
848
849 //
850 // Check if the entry is small enough to be indexed on a free list
851 // If it is, we'll want to re-insert it, since we're about to
852 // collapse our pages on top of it, which will change its count
853 //
854 if (FreeEntry->Size < (MI_MAX_FREE_PAGE_LISTS - 1))
855 {
856 //
857 // Remove the list from where it is now
858 //
859 RemoveEntryList(&FreeEntry->List);
860
861 //
862 // Update its size
863 //
864 FreeEntry->Size += FreePages;
865
866 //
867 // And now find the new appropriate list to place it in
868 //
869 i = (ULONG)(FreeEntry->Size - 1);
870 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
871
872 //
873 // Do it
874 //
875 InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
876 }
877 else
878 {
879 //
880 // Otherwise, just combine our free pages into this entry
881 //
882 FreeEntry->Size += FreePages;
883 }
884 }
885
886 //
887 // Check if we were unable to do any compaction, and we'll stick with this
888 //
889 if (FreeEntry == StartingVa)
890 {
891 //
892 // Well, now we are a free entry. At worse we just have our newly freed
893 // pages, at best we have our pages plus whatever entry came after us
894 //
895 FreeEntry->Size = FreePages;
896
897 //
898 // Find the appropriate list we should be on
899 //
900 i = FreeEntry->Size - 1;
901 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
902
903 //
904 // And insert us
905 //
906 InsertTailList (&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
907 }
908
909 //
910 // Just a sanity check
911 //
912 ASSERT(FreePages != 0);
913
914 //
915 // Get all the pages between our allocation and its end. These will all now
916 // become free page chunks.
917 //
918 NextEntry = StartingVa;
919 LastEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + (FreePages << PAGE_SHIFT));
920 do
921 {
922 //
923 // Link back to the parent free entry, and keep going
924 //
925 NextEntry->Owner = FreeEntry;
926 NextEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + PAGE_SIZE);
927 } while (NextEntry != LastEntry);
928
929 //
930 // We're done, release the lock and let the caller know how much we freed
931 //
932 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
933 return NumberOfPages;
934 }
935
936
937 BOOLEAN
938 NTAPI
939 MiRaisePoolQuota(IN POOL_TYPE PoolType,
940 IN ULONG CurrentMaxQuota,
941 OUT PULONG NewMaxQuota)
942 {
943 //
944 // Not implemented
945 //
946 UNIMPLEMENTED;
947 *NewMaxQuota = CurrentMaxQuota + 65536;
948 return TRUE;
949 }
950
951 /* PUBLIC FUNCTIONS ***********************************************************/
952
953 /*
954 * @unimplemented
955 */
956 PVOID
957 NTAPI
958 MmAllocateMappingAddress(IN SIZE_T NumberOfBytes,
959 IN ULONG PoolTag)
960 {
961 UNIMPLEMENTED;
962 return NULL;
963 }
964
965 /*
966 * @unimplemented
967 */
968 VOID
969 NTAPI
970 MmFreeMappingAddress(IN PVOID BaseAddress,
971 IN ULONG PoolTag)
972 {
973 UNIMPLEMENTED;
974 }
975
976 /* EOF */