[winnt.h]
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / pool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/pool.c
5 * PURPOSE: ARM Memory Manager Pool Allocator
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::POOL"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
18
19 /* GLOBALS ********************************************************************/
20
21 LIST_ENTRY MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
22 PFN_NUMBER MmNumberOfFreeNonPagedPool, MiExpansionPoolPagesInitialCharge;
23 PVOID MmNonPagedPoolEnd0;
24 PFN_NUMBER MiStartOfInitialPoolFrame, MiEndOfInitialPoolFrame;
25 KGUARDED_MUTEX MmPagedPoolMutex;
26 MM_PAGED_POOL_INFO MmPagedPoolInfo;
27 SIZE_T MmAllocatedNonPagedPool;
28 ULONG MmSpecialPoolTag;
29 ULONG MmConsumedPoolPercentage;
30 BOOLEAN MmProtectFreedNonPagedPool;
31
32 /* PRIVATE FUNCTIONS **********************************************************/
33
34 VOID
35 NTAPI
36 MiInitializeNonPagedPoolThresholds(VOID)
37 {
38 PFN_NUMBER Size = MmMaximumNonPagedPoolInPages;
39
40 /* Default low threshold of 8MB or one third of nonpaged pool */
41 MiLowNonPagedPoolThreshold = (8 * _1MB) >> PAGE_SHIFT;
42 MiLowNonPagedPoolThreshold = min(MiLowNonPagedPoolThreshold, Size / 3);
43
44 /* Default high threshold of 20MB or 50% */
45 MiHighNonPagedPoolThreshold = (20 * _1MB) >> PAGE_SHIFT;
46 MiHighNonPagedPoolThreshold = min(MiHighNonPagedPoolThreshold, Size / 2);
47 ASSERT(MiLowNonPagedPoolThreshold < MiHighNonPagedPoolThreshold);
48 }
49
50 VOID
51 NTAPI
52 MiInitializePoolEvents(VOID)
53 {
54 KIRQL OldIrql;
55 PFN_NUMBER FreePoolInPages;
56
57 /* Lock paged pool */
58 KeAcquireGuardedMutex(&MmPagedPoolMutex);
59
60 /* Total size of the paged pool minus the allocated size, is free */
61 FreePoolInPages = MmSizeOfPagedPoolInPages - MmPagedPoolInfo.AllocatedPagedPool;
62
63 /* Check the initial state high state */
64 if (FreePoolInPages >= MiHighPagedPoolThreshold)
65 {
66 /* We have plenty of pool */
67 KeSetEvent(MiHighPagedPoolEvent, 0, FALSE);
68 }
69 else
70 {
71 /* We don't */
72 KeClearEvent(MiHighPagedPoolEvent);
73 }
74
75 /* Check the initial low state */
76 if (FreePoolInPages <= MiLowPagedPoolThreshold)
77 {
78 /* We're very low in free pool memory */
79 KeSetEvent(MiLowPagedPoolEvent, 0, FALSE);
80 }
81 else
82 {
83 /* We're not */
84 KeClearEvent(MiLowPagedPoolEvent);
85 }
86
87 /* Release the paged pool lock */
88 KeReleaseGuardedMutex(&MmPagedPoolMutex);
89
90 /* Now it's time for the nonpaged pool lock */
91 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
92
93 /* Free pages are the maximum minus what's been allocated */
94 FreePoolInPages = MmMaximumNonPagedPoolInPages - MmAllocatedNonPagedPool;
95
96 /* Check if we have plenty */
97 if (FreePoolInPages >= MiHighNonPagedPoolThreshold)
98 {
99 /* We do, set the event */
100 KeSetEvent(MiHighNonPagedPoolEvent, 0, FALSE);
101 }
102 else
103 {
104 /* We don't, clear the event */
105 KeClearEvent(MiHighNonPagedPoolEvent);
106 }
107
108 /* Check if we have very little */
109 if (FreePoolInPages <= MiLowNonPagedPoolThreshold)
110 {
111 /* We do, set the event */
112 KeSetEvent(MiLowNonPagedPoolEvent, 0, FALSE);
113 }
114 else
115 {
116 /* We don't, clear it */
117 KeClearEvent(MiLowNonPagedPoolEvent);
118 }
119
120 /* We're done, release the nonpaged pool lock */
121 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
122 }
123
124 VOID
125 NTAPI
126 MiInitializeNonPagedPool(VOID)
127 {
128 ULONG i;
129 PFN_NUMBER PoolPages;
130 PMMFREE_POOL_ENTRY FreeEntry, FirstEntry;
131 PMMPTE PointerPte;
132 PAGED_CODE();
133
134 //
135 // We keep 4 lists of free pages (4 lists help avoid contention)
136 //
137 for (i = 0; i < MI_MAX_FREE_PAGE_LISTS; i++)
138 {
139 //
140 // Initialize each of them
141 //
142 InitializeListHead(&MmNonPagedPoolFreeListHead[i]);
143 }
144
145 //
146 // Calculate how many pages the initial nonpaged pool has
147 //
148 PoolPages = BYTES_TO_PAGES(MmSizeOfNonPagedPoolInBytes);
149 MmNumberOfFreeNonPagedPool = PoolPages;
150
151 //
152 // Initialize the first free entry
153 //
154 FreeEntry = MmNonPagedPoolStart;
155 FirstEntry = FreeEntry;
156 FreeEntry->Size = PoolPages;
157 FreeEntry->Owner = FirstEntry;
158
159 //
160 // Insert it into the last list
161 //
162 InsertHeadList(&MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS - 1],
163 &FreeEntry->List);
164
165 //
166 // Now create free entries for every single other page
167 //
168 while (PoolPages-- > 1)
169 {
170 //
171 // Link them all back to the original entry
172 //
173 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)FreeEntry + PAGE_SIZE);
174 FreeEntry->Owner = FirstEntry;
175 }
176
177 //
178 // Validate and remember first allocated pool page
179 //
180 PointerPte = MiAddressToPte(MmNonPagedPoolStart);
181 ASSERT(PointerPte->u.Hard.Valid == 1);
182 MiStartOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
183
184 //
185 // Keep track of where initial nonpaged pool ends
186 //
187 MmNonPagedPoolEnd0 = (PVOID)((ULONG_PTR)MmNonPagedPoolStart +
188 MmSizeOfNonPagedPoolInBytes);
189
190 //
191 // Validate and remember last allocated pool page
192 //
193 PointerPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolEnd0 - 1));
194 ASSERT(PointerPte->u.Hard.Valid == 1);
195 MiEndOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
196
197 //
198 // Validate the first nonpaged pool expansion page (which is a guard page)
199 //
200 PointerPte = MiAddressToPte(MmNonPagedPoolExpansionStart);
201 ASSERT(PointerPte->u.Hard.Valid == 0);
202
203 //
204 // Calculate the size of the expansion region alone
205 //
206 MiExpansionPoolPagesInitialCharge =
207 BYTES_TO_PAGES(MmMaximumNonPagedPoolInBytes - MmSizeOfNonPagedPoolInBytes);
208
209 //
210 // Remove 2 pages, since there's a guard page on top and on the bottom
211 //
212 MiExpansionPoolPagesInitialCharge -= 2;
213
214 //
215 // Now initialize the nonpaged pool expansion PTE space. Remember there's a
216 // guard page on top so make sure to skip it. The bottom guard page will be
217 // guaranteed by the fact our size is off by one.
218 //
219 MiInitializeSystemPtes(PointerPte + 1,
220 MiExpansionPoolPagesInitialCharge,
221 NonPagedPoolExpansion);
222 }
223
224 PVOID
225 NTAPI
226 MiAllocatePoolPages(IN POOL_TYPE PoolType,
227 IN SIZE_T SizeInBytes)
228 {
229 PFN_NUMBER SizeInPages, PageFrameNumber;
230 ULONG i;
231 KIRQL OldIrql;
232 PLIST_ENTRY NextEntry, NextHead, LastHead;
233 PMMPTE PointerPte, StartPte;
234 MMPTE TempPte;
235 PMMPFN Pfn1;
236 PVOID BaseVa, BaseVaStart;
237 PMMFREE_POOL_ENTRY FreeEntry;
238 PKSPIN_LOCK_QUEUE LockQueue;
239
240 //
241 // Figure out how big the allocation is in pages
242 //
243 SizeInPages = BYTES_TO_PAGES(SizeInBytes);
244
245 //
246 // Handle paged pool
247 //
248 if (PoolType == PagedPool)
249 {
250 //
251 // Lock the paged pool mutex
252 //
253 KeAcquireGuardedMutex(&MmPagedPoolMutex);
254
255 //
256 // Find some empty allocation space
257 //
258 i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
259 SizeInPages,
260 MmPagedPoolInfo.PagedPoolHint);
261 if (i == 0xFFFFFFFF)
262 {
263 //
264 // Get the page bit count
265 //
266 i = ((SizeInPages - 1) / 1024) + 1;
267
268 //
269 // Check if there is enougn paged pool expansion space left
270 //
271 if (MmPagedPoolInfo.NextPdeForPagedPoolExpansion >
272 MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
273 {
274 //
275 // Out of memory!
276 //
277 DPRINT1("OUT OF PAGED POOL!!!\n");
278 KeReleaseGuardedMutex(&MmPagedPoolMutex);
279 return NULL;
280 }
281
282 //
283 // Check if we'll have to expand past the last PTE we have available
284 //
285 if (((i - 1) + MmPagedPoolInfo.NextPdeForPagedPoolExpansion) >
286 MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
287 {
288 //
289 // We can only support this much then
290 //
291 SizeInPages = MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool) -
292 MmPagedPoolInfo.NextPdeForPagedPoolExpansion +
293 1;
294 ASSERT(SizeInPages < i);
295 i = SizeInPages;
296 }
297 else
298 {
299 //
300 // Otherwise, there is plenty of space left for this expansion
301 //
302 SizeInPages = i;
303 }
304
305 //
306 // Get the template PTE we'll use to expand
307 //
308 TempPte = ValidKernelPte;
309
310 //
311 // Get the first PTE in expansion space
312 //
313 PointerPte = MmPagedPoolInfo.NextPdeForPagedPoolExpansion;
314 BaseVa = MiPteToAddress(PointerPte);
315 BaseVaStart = BaseVa;
316
317 //
318 // Lock the PFN database and loop pages
319 //
320 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
321 do
322 {
323 //
324 // It should not already be valid
325 //
326 ASSERT(PointerPte->u.Hard.Valid == 0);
327
328 //
329 // Request a paged pool page and write the PFN for it
330 //
331 PageFrameNumber = MmAllocPage(MC_PPOOL);
332 TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
333
334 //
335 // Save it into our double-buffered system page directory
336 //
337 /* This seems to be making the assumption that one PDE is one page long */
338 ASSERT(PAGE_SIZE == (PD_COUNT * (sizeof(MMPTE) * PDE_COUNT)));
339 MmSystemPagePtes[(ULONG_PTR)PointerPte & (PAGE_SIZE - 1) /
340 sizeof(MMPTE)] = TempPte;
341
342 /* Write the actual PTE now */
343 ASSERT(TempPte.u.Hard.Valid == 1);
344 *PointerPte++ = TempPte;
345
346 //
347 // Move on to the next expansion address
348 //
349 BaseVa = (PVOID)((ULONG_PTR)BaseVa + PAGE_SIZE);
350 } while (--i > 0);
351
352 //
353 // Release the PFN database lock
354 //
355 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
356
357 //
358 // These pages are now available, clear their availablity bits
359 //
360 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap,
361 (MmPagedPoolInfo.NextPdeForPagedPoolExpansion -
362 MiAddressToPte(MmPagedPoolInfo.FirstPteForPagedPool)) *
363 1024,
364 SizeInPages * 1024);
365
366 //
367 // Update the next expansion location
368 //
369 MmPagedPoolInfo.NextPdeForPagedPoolExpansion += SizeInPages;
370
371 //
372 // Zero out the newly available memory
373 //
374 RtlZeroMemory(BaseVaStart, SizeInPages * PAGE_SIZE);
375
376 //
377 // Now try consuming the pages again
378 //
379 SizeInPages = BYTES_TO_PAGES(SizeInBytes);
380 i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
381 SizeInPages,
382 0);
383 if (i == 0xFFFFFFFF)
384 {
385 //
386 // Out of memory!
387 //
388 DPRINT1("OUT OF PAGED POOL!!!\n");
389 KeReleaseGuardedMutex(&MmPagedPoolMutex);
390 return NULL;
391 }
392 }
393
394 //
395 // Update the pool hint if the request was just one page
396 //
397 if (SizeInPages == 1) MmPagedPoolInfo.PagedPoolHint = i + 1;
398
399 //
400 // Update the end bitmap so we know the bounds of this allocation when
401 // the time comes to free it
402 //
403 RtlSetBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, i + SizeInPages - 1);
404
405 //
406 // Now we can release the lock (it mainly protects the bitmap)
407 //
408 KeReleaseGuardedMutex(&MmPagedPoolMutex);
409
410 //
411 // Now figure out where this allocation starts
412 //
413 BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT));
414
415 //
416 // Flush the TLB
417 //
418 KeFlushEntireTb(TRUE, TRUE);
419
420 /* Setup a demand-zero writable PTE */
421 DPRINT1("Setting up demand zero\n");
422 MI_MAKE_SOFTWARE_PTE(&TempPte, MM_READWRITE);
423
424 //
425 // Find the first and last PTE, then loop them all
426 //
427 PointerPte = MiAddressToPte(BaseVa);
428 StartPte = PointerPte + SizeInPages;
429 do
430 {
431 //
432 // Write the demand zero PTE and keep going
433 //
434 ASSERT(PointerPte->u.Hard.Valid == 0);
435 *PointerPte++ = TempPte;
436 } while (PointerPte < StartPte);
437
438 //
439 // Return the allocation address to the caller
440 //
441 return BaseVa;
442 }
443
444 //
445 // Allocations of less than 4 pages go into their individual buckets
446 //
447 i = SizeInPages - 1;
448 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
449
450 //
451 // Loop through all the free page lists based on the page index
452 //
453 NextHead = &MmNonPagedPoolFreeListHead[i];
454 LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
455
456 //
457 // Acquire the nonpaged pool lock
458 //
459 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
460 do
461 {
462 //
463 // Now loop through all the free page entries in this given list
464 //
465 NextEntry = NextHead->Flink;
466 while (NextEntry != NextHead)
467 {
468 //
469 // Grab the entry and see if it can handle our allocation
470 //
471 FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List);
472 if (FreeEntry->Size >= SizeInPages)
473 {
474 //
475 // It does, so consume the pages from here
476 //
477 FreeEntry->Size -= SizeInPages;
478
479 //
480 // The allocation will begin in this free page area
481 //
482 BaseVa = (PVOID)((ULONG_PTR)FreeEntry +
483 (FreeEntry->Size << PAGE_SHIFT));
484
485 //
486 // This is not a free page segment anymore
487 //
488 RemoveEntryList(&FreeEntry->List);
489
490 //
491 // However, check if its' still got space left
492 //
493 if (FreeEntry->Size != 0)
494 {
495 //
496 // Insert it back into a different list, based on its pages
497 //
498 i = FreeEntry->Size - 1;
499 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
500 InsertTailList (&MmNonPagedPoolFreeListHead[i],
501 &FreeEntry->List);
502 }
503
504 //
505 // Grab the PTE for this allocation
506 //
507 PointerPte = MiAddressToPte(BaseVa);
508 ASSERT(PointerPte->u.Hard.Valid == 1);
509
510 //
511 // Grab the PFN NextEntry and index
512 //
513 Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
514
515 //
516 // Now mark it as the beginning of an allocation
517 //
518 ASSERT(Pfn1->u3.e1.StartOfAllocation == 0);
519 Pfn1->u3.e1.StartOfAllocation = 1;
520
521 //
522 // Check if the allocation is larger than one page
523 //
524 if (SizeInPages != 1)
525 {
526 //
527 // Navigate to the last PFN entry and PTE
528 //
529 PointerPte += SizeInPages - 1;
530 ASSERT(PointerPte->u.Hard.Valid == 1);
531 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
532 }
533
534 //
535 // Mark this PFN as the last (might be the same as the first)
536 //
537 ASSERT(Pfn1->u3.e1.EndOfAllocation == 0);
538 Pfn1->u3.e1.EndOfAllocation = 1;
539
540 //
541 // Release the nonpaged pool lock, and return the allocation
542 //
543 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
544 return BaseVa;
545 }
546
547 //
548 // Try the next free page entry
549 //
550 NextEntry = FreeEntry->List.Flink;
551 }
552 } while (++NextHead < LastHead);
553
554 //
555 // If we got here, we're out of space.
556 // Start by releasing the lock
557 //
558 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
559
560 //
561 // Allocate some system PTEs
562 //
563 StartPte = MiReserveSystemPtes(SizeInPages, NonPagedPoolExpansion);
564 PointerPte = StartPte;
565 if (StartPte == NULL)
566 {
567 //
568 // Ran out of memory
569 //
570 DPRINT1("Out of NP Expansion Pool\n");
571 return NULL;
572 }
573
574 //
575 // Acquire the pool lock now
576 //
577 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
578
579 //
580 // Lock the PFN database too
581 //
582 LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueuePfnLock];
583 KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);
584
585 //
586 // Loop the pages
587 //
588 TempPte = ValidKernelPte;
589 do
590 {
591 /* Allocate a page */
592 PageFrameNumber = MiRemoveAnyPage(0);
593
594 /* Get the PFN entry for it and fill it out */
595 Pfn1 = MiGetPfnEntry(PageFrameNumber);
596 Pfn1->u3.e2.ReferenceCount = 1;
597 Pfn1->u2.ShareCount = 1;
598 Pfn1->PteAddress = PointerPte;
599 Pfn1->u3.e1.PageLocation = ActiveAndValid;
600 Pfn1->u4.VerifierAllocation = 0;
601
602 /* Write the PTE for it */
603 TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
604 ASSERT(PointerPte->u.Hard.Valid == 0);
605 ASSERT(TempPte.u.Hard.Valid == 1);
606 *PointerPte++ = TempPte;
607 } while (--SizeInPages > 0);
608
609 //
610 // This is the last page
611 //
612 Pfn1->u3.e1.EndOfAllocation = 1;
613
614 //
615 // Get the first page and mark it as such
616 //
617 Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber);
618 Pfn1->u3.e1.StartOfAllocation = 1;
619
620 //
621 // Release the PFN and nonpaged pool lock
622 //
623 KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
624 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
625
626 //
627 // Return the address
628 //
629 return MiPteToAddress(StartPte);
630 }
631
632 ULONG
633 NTAPI
634 MiFreePoolPages(IN PVOID StartingVa)
635 {
636 PMMPTE PointerPte, StartPte;
637 PMMPFN Pfn1, StartPfn;
638 PFN_NUMBER FreePages, NumberOfPages;
639 KIRQL OldIrql;
640 PMMFREE_POOL_ENTRY FreeEntry, NextEntry, LastEntry;
641 ULONG i, End;
642
643 //
644 // Handle paged pool
645 //
646 if ((StartingVa >= MmPagedPoolStart) && (StartingVa <= MmPagedPoolEnd))
647 {
648 //
649 // Calculate the offset from the beginning of paged pool, and convert it
650 // into pages
651 //
652 i = ((ULONG_PTR)StartingVa - (ULONG_PTR)MmPagedPoolStart) >> PAGE_SHIFT;
653 End = i;
654
655 //
656 // Now use the end bitmap to scan until we find a set bit, meaning that
657 // this allocation finishes here
658 //
659 while (!RtlTestBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End)) End++;
660
661 //
662 // Now calculate the total number of pages this allocation spans
663 //
664 NumberOfPages = End - i + 1;
665
666 //
667 // Acquire the paged pool lock
668 //
669 KeAcquireGuardedMutex(&MmPagedPoolMutex);
670
671 //
672 // Clear the allocation and free bits
673 //
674 RtlClearBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, i);
675 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, i, NumberOfPages);
676
677 //
678 // Update the hint if we need to
679 //
680 if (i < MmPagedPoolInfo.PagedPoolHint) MmPagedPoolInfo.PagedPoolHint = i;
681
682 //
683 // Release the lock protecting the bitmaps
684 //
685 KeReleaseGuardedMutex(&MmPagedPoolMutex);
686
687 //
688 // And finally return the number of pages freed
689 //
690 return NumberOfPages;
691 }
692
693 //
694 // Get the first PTE and its corresponding PFN entry
695 //
696 StartPte = PointerPte = MiAddressToPte(StartingVa);
697 StartPfn = Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
698
699 //
700 // Loop until we find the last PTE
701 //
702 while (Pfn1->u3.e1.EndOfAllocation == 0)
703 {
704 //
705 // Keep going
706 //
707 PointerPte++;
708 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
709 }
710
711 //
712 // Now we know how many pages we have
713 //
714 NumberOfPages = PointerPte - StartPte + 1;
715
716 //
717 // Acquire the nonpaged pool lock
718 //
719 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
720
721 //
722 // Mark the first and last PTEs as not part of an allocation anymore
723 //
724 StartPfn->u3.e1.StartOfAllocation = 0;
725 Pfn1->u3.e1.EndOfAllocation = 0;
726
727 //
728 // Assume we will free as many pages as the allocation was
729 //
730 FreePages = NumberOfPages;
731
732 //
733 // Peek one page past the end of the allocation
734 //
735 PointerPte++;
736
737 //
738 // Guard against going past initial nonpaged pool
739 //
740 if (MiGetPfnEntryIndex(Pfn1) == MiEndOfInitialPoolFrame)
741 {
742 //
743 // This page is on the outskirts of initial nonpaged pool, so ignore it
744 //
745 Pfn1 = NULL;
746 }
747 else
748 {
749 //
750 // Otherwise, our entire allocation must've fit within the initial non
751 // paged pool, or the expansion nonpaged pool, so get the PFN entry of
752 // the next allocation
753 //
754 ASSERT((ULONG_PTR)StartingVa + NumberOfPages <= (ULONG_PTR)MmNonPagedPoolEnd);
755 if (PointerPte->u.Hard.Valid == 1)
756 {
757 //
758 // It's either expansion or initial: get the PFN entry
759 //
760 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
761 }
762 else
763 {
764 //
765 // This means we've reached the guard page that protects the end of
766 // the expansion nonpaged pool
767 //
768 Pfn1 = NULL;
769 }
770
771 }
772
773 //
774 // Check if this allocation actually exists
775 //
776 if ((Pfn1) && (Pfn1->u3.e1.StartOfAllocation == 0))
777 {
778 //
779 // It doesn't, so we should actually locate a free entry descriptor
780 //
781 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa +
782 (NumberOfPages << PAGE_SHIFT));
783 ASSERT(FreeEntry->Owner == FreeEntry);
784
785 //
786 // Consume this entry's pages, and remove it from its free list
787 //
788 FreePages += FreeEntry->Size;
789 RemoveEntryList (&FreeEntry->List);
790 }
791
792 //
793 // Now get the official free entry we'll create for the caller's allocation
794 //
795 FreeEntry = StartingVa;
796
797 //
798 // Check if the our allocation is the very first page
799 //
800 if (MiGetPfnEntryIndex(StartPfn) == MiStartOfInitialPoolFrame)
801 {
802 //
803 // Then we can't do anything or we'll risk underflowing
804 //
805 Pfn1 = NULL;
806 }
807 else
808 {
809 //
810 // Otherwise, get the PTE for the page right before our allocation
811 //
812 PointerPte -= NumberOfPages + 1;
813 if (PointerPte->u.Hard.Valid == 1)
814 {
815 //
816 // It's either expansion or initial nonpaged pool, get the PFN entry
817 //
818 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
819 }
820 else
821 {
822 //
823 // We must've reached the guard page, so don't risk touching it
824 //
825 Pfn1 = NULL;
826 }
827 }
828
829 //
830 // Check if there is a valid PFN entry for the page before the allocation
831 // and then check if this page was actually the end of an allocation.
832 // If it wasn't, then we know for sure it's a free page
833 //
834 if ((Pfn1) && (Pfn1->u3.e1.EndOfAllocation == 0))
835 {
836 //
837 // Get the free entry descriptor for that given page range
838 //
839 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa - PAGE_SIZE);
840 FreeEntry = FreeEntry->Owner;
841
842 //
843 // Check if the entry is small enough to be indexed on a free list
844 // If it is, we'll want to re-insert it, since we're about to
845 // collapse our pages on top of it, which will change its count
846 //
847 if (FreeEntry->Size < (MI_MAX_FREE_PAGE_LISTS - 1))
848 {
849 //
850 // Remove the list from where it is now
851 //
852 RemoveEntryList(&FreeEntry->List);
853
854 //
855 // Update its size
856 //
857 FreeEntry->Size += FreePages;
858
859 //
860 // And now find the new appropriate list to place it in
861 //
862 i = (ULONG)(FreeEntry->Size - 1);
863 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
864
865 //
866 // Do it
867 //
868 InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
869 }
870 else
871 {
872 //
873 // Otherwise, just combine our free pages into this entry
874 //
875 FreeEntry->Size += FreePages;
876 }
877 }
878
879 //
880 // Check if we were unable to do any compaction, and we'll stick with this
881 //
882 if (FreeEntry == StartingVa)
883 {
884 //
885 // Well, now we are a free entry. At worse we just have our newly freed
886 // pages, at best we have our pages plus whatever entry came after us
887 //
888 FreeEntry->Size = FreePages;
889
890 //
891 // Find the appropriate list we should be on
892 //
893 i = FreeEntry->Size - 1;
894 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
895
896 //
897 // And insert us
898 //
899 InsertTailList (&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
900 }
901
902 //
903 // Just a sanity check
904 //
905 ASSERT(FreePages != 0);
906
907 //
908 // Get all the pages between our allocation and its end. These will all now
909 // become free page chunks.
910 //
911 NextEntry = StartingVa;
912 LastEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + (FreePages << PAGE_SHIFT));
913 do
914 {
915 //
916 // Link back to the parent free entry, and keep going
917 //
918 NextEntry->Owner = FreeEntry;
919 NextEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + PAGE_SIZE);
920 } while (NextEntry != LastEntry);
921
922 //
923 // We're done, release the lock and let the caller know how much we freed
924 //
925 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
926 return NumberOfPages;
927 }
928
929
930 BOOLEAN
931 NTAPI
932 MiRaisePoolQuota(IN POOL_TYPE PoolType,
933 IN ULONG CurrentMaxQuota,
934 OUT PULONG NewMaxQuota)
935 {
936 //
937 // Not implemented
938 //
939 UNIMPLEMENTED;
940 *NewMaxQuota = CurrentMaxQuota + 65536;
941 return TRUE;
942 }
943
944 /* PUBLIC FUNCTIONS ***********************************************************/
945
946 /*
947 * @unimplemented
948 */
949 PVOID
950 NTAPI
951 MmAllocateMappingAddress(IN SIZE_T NumberOfBytes,
952 IN ULONG PoolTag)
953 {
954 UNIMPLEMENTED;
955 return NULL;
956 }
957
958 /*
959 * @unimplemented
960 */
961 VOID
962 NTAPI
963 MmFreeMappingAddress(IN PVOID BaseAddress,
964 IN ULONG PoolTag)
965 {
966 UNIMPLEMENTED;
967 }
968
969 /* EOF */