sync with trunk r47227
[reactos.git] / ntoskrnl / mm / ARM3 / pool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/pool.c
5 * PURPOSE: ARM Memory Manager Pool Allocator
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::POOL"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
18
19 /* GLOBALS ********************************************************************/
20
21 LIST_ENTRY MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
22 PFN_NUMBER MmNumberOfFreeNonPagedPool, MiExpansionPoolPagesInitialCharge;
23 PVOID MmNonPagedPoolEnd0;
24 PFN_NUMBER MiStartOfInitialPoolFrame, MiEndOfInitialPoolFrame;
25 KGUARDED_MUTEX MmPagedPoolMutex;
26 MM_PAGED_POOL_INFO MmPagedPoolInfo;
27 SIZE_T MmAllocatedNonPagedPool;
28 ULONG MmSpecialPoolTag;
29 ULONG MmConsumedPoolPercentage;
30 BOOLEAN MmProtectFreedNonPagedPool;
31
32 /* PRIVATE FUNCTIONS **********************************************************/
33
34 VOID
35 NTAPI
36 MiInitializeNonPagedPoolThresholds(VOID)
37 {
38 PFN_NUMBER Size = MmMaximumNonPagedPoolInPages;
39
40 /* Default low threshold of 8MB or one third of nonpaged pool */
41 MiLowNonPagedPoolThreshold = (8 * _1MB) >> PAGE_SHIFT;
42 MiLowNonPagedPoolThreshold = min(MiLowNonPagedPoolThreshold, Size / 3);
43
44 /* Default high threshold of 20MB or 50% */
45 MiHighNonPagedPoolThreshold = (20 * _1MB) >> PAGE_SHIFT;
46 MiHighNonPagedPoolThreshold = min(MiHighNonPagedPoolThreshold, Size / 2);
47 ASSERT(MiLowNonPagedPoolThreshold < MiHighNonPagedPoolThreshold);
48 }
49
50 VOID
51 NTAPI
52 MiInitializePoolEvents(VOID)
53 {
54 KIRQL OldIrql;
55 PFN_NUMBER FreePoolInPages;
56
57 /* Lock paged pool */
58 KeAcquireGuardedMutex(&MmPagedPoolMutex);
59
60 /* Total size of the paged pool minus the allocated size, is free */
61 FreePoolInPages = MmSizeOfPagedPoolInPages - MmPagedPoolInfo.AllocatedPagedPool;
62
63 /* Check the initial state high state */
64 if (FreePoolInPages >= MiHighPagedPoolThreshold)
65 {
66 /* We have plenty of pool */
67 KeSetEvent(MiHighPagedPoolEvent, 0, FALSE);
68 }
69 else
70 {
71 /* We don't */
72 KeClearEvent(MiHighPagedPoolEvent);
73 }
74
75 /* Check the initial low state */
76 if (FreePoolInPages <= MiLowPagedPoolThreshold)
77 {
78 /* We're very low in free pool memory */
79 KeSetEvent(MiLowPagedPoolEvent, 0, FALSE);
80 }
81 else
82 {
83 /* We're not */
84 KeClearEvent(MiLowPagedPoolEvent);
85 }
86
87 /* Release the paged pool lock */
88 KeReleaseGuardedMutex(&MmPagedPoolMutex);
89
90 /* Now it's time for the nonpaged pool lock */
91 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
92
93 /* Free pages are the maximum minus what's been allocated */
94 FreePoolInPages = MmMaximumNonPagedPoolInPages - MmAllocatedNonPagedPool;
95
96 /* Check if we have plenty */
97 if (FreePoolInPages >= MiHighNonPagedPoolThreshold)
98 {
99 /* We do, set the event */
100 KeSetEvent(MiHighNonPagedPoolEvent, 0, FALSE);
101 }
102 else
103 {
104 /* We don't, clear the event */
105 KeClearEvent(MiHighNonPagedPoolEvent);
106 }
107
108 /* Check if we have very little */
109 if (FreePoolInPages <= MiLowNonPagedPoolThreshold)
110 {
111 /* We do, set the event */
112 KeSetEvent(MiLowNonPagedPoolEvent, 0, FALSE);
113 }
114 else
115 {
116 /* We don't, clear it */
117 KeClearEvent(MiLowNonPagedPoolEvent);
118 }
119
120 /* We're done, release the nonpaged pool lock */
121 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
122 }
123
124 VOID
125 NTAPI
126 MiInitializeNonPagedPool(VOID)
127 {
128 ULONG i;
129 PFN_NUMBER PoolPages;
130 PMMFREE_POOL_ENTRY FreeEntry, FirstEntry;
131 PMMPTE PointerPte;
132 PAGED_CODE();
133
134 //
135 // We keep 4 lists of free pages (4 lists help avoid contention)
136 //
137 for (i = 0; i < MI_MAX_FREE_PAGE_LISTS; i++)
138 {
139 //
140 // Initialize each of them
141 //
142 InitializeListHead(&MmNonPagedPoolFreeListHead[i]);
143 }
144
145 //
146 // Calculate how many pages the initial nonpaged pool has
147 //
148 PoolPages = BYTES_TO_PAGES(MmSizeOfNonPagedPoolInBytes);
149 MmNumberOfFreeNonPagedPool = PoolPages;
150
151 //
152 // Initialize the first free entry
153 //
154 FreeEntry = MmNonPagedPoolStart;
155 FirstEntry = FreeEntry;
156 FreeEntry->Size = PoolPages;
157 FreeEntry->Owner = FirstEntry;
158
159 //
160 // Insert it into the last list
161 //
162 InsertHeadList(&MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS - 1],
163 &FreeEntry->List);
164
165 //
166 // Now create free entries for every single other page
167 //
168 while (PoolPages-- > 1)
169 {
170 //
171 // Link them all back to the original entry
172 //
173 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)FreeEntry + PAGE_SIZE);
174 FreeEntry->Owner = FirstEntry;
175 }
176
177 //
178 // Validate and remember first allocated pool page
179 //
180 PointerPte = MiAddressToPte(MmNonPagedPoolStart);
181 ASSERT(PointerPte->u.Hard.Valid == 1);
182 MiStartOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
183
184 //
185 // Keep track of where initial nonpaged pool ends
186 //
187 MmNonPagedPoolEnd0 = (PVOID)((ULONG_PTR)MmNonPagedPoolStart +
188 MmSizeOfNonPagedPoolInBytes);
189
190 //
191 // Validate and remember last allocated pool page
192 //
193 PointerPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolEnd0 - 1));
194 ASSERT(PointerPte->u.Hard.Valid == 1);
195 MiEndOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
196
197 //
198 // Validate the first nonpaged pool expansion page (which is a guard page)
199 //
200 PointerPte = MiAddressToPte(MmNonPagedPoolExpansionStart);
201 ASSERT(PointerPte->u.Hard.Valid == 0);
202
203 //
204 // Calculate the size of the expansion region alone
205 //
206 MiExpansionPoolPagesInitialCharge =
207 BYTES_TO_PAGES(MmMaximumNonPagedPoolInBytes - MmSizeOfNonPagedPoolInBytes);
208
209 //
210 // Remove 2 pages, since there's a guard page on top and on the bottom
211 //
212 MiExpansionPoolPagesInitialCharge -= 2;
213
214 //
215 // Now initialize the nonpaged pool expansion PTE space. Remember there's a
216 // guard page on top so make sure to skip it. The bottom guard page will be
217 // guaranteed by the fact our size is off by one.
218 //
219 MiInitializeSystemPtes(PointerPte + 1,
220 MiExpansionPoolPagesInitialCharge,
221 NonPagedPoolExpansion);
222 }
223
224 PVOID
225 NTAPI
226 MiAllocatePoolPages(IN POOL_TYPE PoolType,
227 IN SIZE_T SizeInBytes)
228 {
229 PFN_NUMBER SizeInPages, PageFrameNumber;
230 ULONG i;
231 KIRQL OldIrql;
232 PLIST_ENTRY NextEntry, NextHead, LastHead;
233 PMMPTE PointerPte, StartPte;
234 MMPTE TempPte;
235 PMMPFN Pfn1;
236 PVOID BaseVa, BaseVaStart;
237 PMMFREE_POOL_ENTRY FreeEntry;
238 PKSPIN_LOCK_QUEUE LockQueue;
239
240 //
241 // Figure out how big the allocation is in pages
242 //
243 SizeInPages = BYTES_TO_PAGES(SizeInBytes);
244
245 //
246 // Handle paged pool
247 //
248 if (PoolType == PagedPool)
249 {
250 //
251 // Lock the paged pool mutex
252 //
253 KeAcquireGuardedMutex(&MmPagedPoolMutex);
254
255 //
256 // Find some empty allocation space
257 //
258 i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
259 SizeInPages,
260 MmPagedPoolInfo.PagedPoolHint);
261 if (i == 0xFFFFFFFF)
262 {
263 //
264 // Get the page bit count
265 //
266 i = ((SizeInPages - 1) / 1024) + 1;
267
268 //
269 // Check if there is enougn paged pool expansion space left
270 //
271 if (MmPagedPoolInfo.NextPdeForPagedPoolExpansion >
272 MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
273 {
274 //
275 // Out of memory!
276 //
277 DPRINT1("OUT OF PAGED POOL!!!\n");
278 KeReleaseGuardedMutex(&MmPagedPoolMutex);
279 return NULL;
280 }
281
282 //
283 // Check if we'll have to expand past the last PTE we have available
284 //
285 if (((i - 1) + MmPagedPoolInfo.NextPdeForPagedPoolExpansion) >
286 MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
287 {
288 //
289 // We can only support this much then
290 //
291 SizeInPages = MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool) -
292 MmPagedPoolInfo.NextPdeForPagedPoolExpansion +
293 1;
294 ASSERT(SizeInPages < i);
295 i = SizeInPages;
296 }
297 else
298 {
299 //
300 // Otherwise, there is plenty of space left for this expansion
301 //
302 SizeInPages = i;
303 }
304
305 //
306 // Get the template PTE we'll use to expand
307 //
308 TempPte = ValidKernelPte;
309
310 //
311 // Get the first PTE in expansion space
312 //
313 PointerPte = MmPagedPoolInfo.NextPdeForPagedPoolExpansion;
314 BaseVa = MiPteToAddress(PointerPte);
315 BaseVaStart = BaseVa;
316
317 //
318 // Lock the PFN database and loop pages
319 //
320 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
321 do
322 {
323 //
324 // It should not already be valid
325 //
326 ASSERT(PointerPte->u.Hard.Valid == 0);
327
328 //
329 // Request a paged pool page and write the PFN for it
330 //
331 PageFrameNumber = MmAllocPage(MC_PPOOL);
332 TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
333
334 //
335 // Save it into our double-buffered system page directory
336 //
337 /* This seems to be making the assumption that one PDE is one page long */
338 ASSERT(PAGE_SIZE == (PD_COUNT * (sizeof(MMPTE) * PDE_COUNT)));
339 MmSystemPagePtes[(ULONG_PTR)PointerPte & (PAGE_SIZE - 1) /
340 sizeof(MMPTE)] = TempPte;
341
342 /* Write the actual PTE now */
343 ASSERT(TempPte.u.Hard.Valid == 1);
344 *PointerPte++ = TempPte;
345
346 //
347 // Move on to the next expansion address
348 //
349 BaseVa = (PVOID)((ULONG_PTR)BaseVa + PAGE_SIZE);
350 } while (--i > 0);
351
352 //
353 // Release the PFN database lock
354 //
355 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
356
357 //
358 // These pages are now available, clear their availablity bits
359 //
360 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap,
361 (MmPagedPoolInfo.NextPdeForPagedPoolExpansion -
362 MiAddressToPte(MmPagedPoolInfo.FirstPteForPagedPool)) *
363 1024,
364 SizeInPages * 1024);
365
366 //
367 // Update the next expansion location
368 //
369 MmPagedPoolInfo.NextPdeForPagedPoolExpansion += SizeInPages;
370
371 //
372 // Zero out the newly available memory
373 //
374 RtlZeroMemory(BaseVaStart, SizeInPages * PAGE_SIZE);
375
376 //
377 // Now try consuming the pages again
378 //
379 SizeInPages = BYTES_TO_PAGES(SizeInBytes);
380 i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
381 SizeInPages,
382 0);
383 if (i == 0xFFFFFFFF)
384 {
385 //
386 // Out of memory!
387 //
388 DPRINT1("OUT OF PAGED POOL!!!\n");
389 KeReleaseGuardedMutex(&MmPagedPoolMutex);
390 return NULL;
391 }
392 }
393
394 //
395 // Update the pool hint if the request was just one page
396 //
397 if (SizeInPages == 1) MmPagedPoolInfo.PagedPoolHint = i + 1;
398
399 //
400 // Update the end bitmap so we know the bounds of this allocation when
401 // the time comes to free it
402 //
403 RtlSetBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, i + SizeInPages - 1);
404
405 //
406 // Now we can release the lock (it mainly protects the bitmap)
407 //
408 KeReleaseGuardedMutex(&MmPagedPoolMutex);
409
410 //
411 // Now figure out where this allocation starts
412 //
413 BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT));
414
415 //
416 // Flush the TLB
417 //
418 KeFlushEntireTb(TRUE, TRUE);
419
420 //
421 // Setup a demand-zero writable PTE
422 //
423 TempPte.u.Long = 0;
424 MI_MAKE_WRITE_PAGE(&TempPte);
425
426 //
427 // Find the first and last PTE, then loop them all
428 //
429 PointerPte = MiAddressToPte(BaseVa);
430 StartPte = PointerPte + SizeInPages;
431 do
432 {
433 //
434 // Write the demand zero PTE and keep going
435 //
436 ASSERT(PointerPte->u.Hard.Valid == 0);
437 *PointerPte++ = TempPte;
438 } while (PointerPte < StartPte);
439
440 //
441 // Return the allocation address to the caller
442 //
443 return BaseVa;
444 }
445
446 //
447 // Allocations of less than 4 pages go into their individual buckets
448 //
449 i = SizeInPages - 1;
450 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
451
452 //
453 // Loop through all the free page lists based on the page index
454 //
455 NextHead = &MmNonPagedPoolFreeListHead[i];
456 LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
457
458 //
459 // Acquire the nonpaged pool lock
460 //
461 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
462 do
463 {
464 //
465 // Now loop through all the free page entries in this given list
466 //
467 NextEntry = NextHead->Flink;
468 while (NextEntry != NextHead)
469 {
470 //
471 // Grab the entry and see if it can handle our allocation
472 //
473 FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List);
474 if (FreeEntry->Size >= SizeInPages)
475 {
476 //
477 // It does, so consume the pages from here
478 //
479 FreeEntry->Size -= SizeInPages;
480
481 //
482 // The allocation will begin in this free page area
483 //
484 BaseVa = (PVOID)((ULONG_PTR)FreeEntry +
485 (FreeEntry->Size << PAGE_SHIFT));
486
487 //
488 // This is not a free page segment anymore
489 //
490 RemoveEntryList(&FreeEntry->List);
491
492 //
493 // However, check if its' still got space left
494 //
495 if (FreeEntry->Size != 0)
496 {
497 //
498 // Insert it back into a different list, based on its pages
499 //
500 i = FreeEntry->Size - 1;
501 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
502 InsertTailList (&MmNonPagedPoolFreeListHead[i],
503 &FreeEntry->List);
504 }
505
506 //
507 // Grab the PTE for this allocation
508 //
509 PointerPte = MiAddressToPte(BaseVa);
510 ASSERT(PointerPte->u.Hard.Valid == 1);
511
512 //
513 // Grab the PFN NextEntry and index
514 //
515 Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
516
517 //
518 // Now mark it as the beginning of an allocation
519 //
520 ASSERT(Pfn1->u3.e1.StartOfAllocation == 0);
521 Pfn1->u3.e1.StartOfAllocation = 1;
522
523 //
524 // Check if the allocation is larger than one page
525 //
526 if (SizeInPages != 1)
527 {
528 //
529 // Navigate to the last PFN entry and PTE
530 //
531 PointerPte += SizeInPages - 1;
532 ASSERT(PointerPte->u.Hard.Valid == 1);
533 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
534 }
535
536 //
537 // Mark this PFN as the last (might be the same as the first)
538 //
539 ASSERT(Pfn1->u3.e1.EndOfAllocation == 0);
540 Pfn1->u3.e1.EndOfAllocation = 1;
541
542 //
543 // Release the nonpaged pool lock, and return the allocation
544 //
545 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
546 return BaseVa;
547 }
548
549 //
550 // Try the next free page entry
551 //
552 NextEntry = FreeEntry->List.Flink;
553 }
554 } while (++NextHead < LastHead);
555
556 //
557 // If we got here, we're out of space.
558 // Start by releasing the lock
559 //
560 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
561
562 //
563 // Allocate some system PTEs
564 //
565 StartPte = MiReserveSystemPtes(SizeInPages, NonPagedPoolExpansion);
566 PointerPte = StartPte;
567 if (StartPte == NULL)
568 {
569 //
570 // Ran out of memory
571 //
572 DPRINT1("Out of NP Expansion Pool\n");
573 return NULL;
574 }
575
576 //
577 // Acquire the pool lock now
578 //
579 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
580
581 //
582 // Lock the PFN database too
583 //
584 LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueuePfnLock];
585 KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);
586
587 //
588 // Loop the pages
589 //
590 TempPte = ValidKernelPte;
591 do
592 {
593 //
594 // Allocate a page
595 //
596 PageFrameNumber = MmAllocPage(MC_NPPOOL);
597
598 /* Get the PFN entry for it and fill it out */
599 Pfn1 = MiGetPfnEntry(PageFrameNumber);
600 Pfn1->u3.e2.ReferenceCount = 1;
601 Pfn1->u2.ShareCount = 1;
602 Pfn1->PteAddress = PointerPte;
603 Pfn1->u3.e1.PageLocation = ActiveAndValid;
604 Pfn1->u4.VerifierAllocation = 0;
605
606 /* Write the PTE for it */
607 TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
608 ASSERT(PointerPte->u.Hard.Valid == 0);
609 ASSERT(TempPte.u.Hard.Valid == 1);
610 *PointerPte++ = TempPte;
611 } while (--SizeInPages > 0);
612
613 //
614 // This is the last page
615 //
616 Pfn1->u3.e1.EndOfAllocation = 1;
617
618 //
619 // Get the first page and mark it as such
620 //
621 Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber);
622 Pfn1->u3.e1.StartOfAllocation = 1;
623
624 //
625 // Release the PFN and nonpaged pool lock
626 //
627 KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
628 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
629
630 //
631 // Return the address
632 //
633 return MiPteToAddress(StartPte);
634 }
635
636 ULONG
637 NTAPI
638 MiFreePoolPages(IN PVOID StartingVa)
639 {
640 PMMPTE PointerPte, StartPte;
641 PMMPFN Pfn1, StartPfn;
642 PFN_NUMBER FreePages, NumberOfPages;
643 KIRQL OldIrql;
644 PMMFREE_POOL_ENTRY FreeEntry, NextEntry, LastEntry;
645 ULONG i, End;
646
647 //
648 // Handle paged pool
649 //
650 if ((StartingVa >= MmPagedPoolStart) && (StartingVa <= MmPagedPoolEnd))
651 {
652 //
653 // Calculate the offset from the beginning of paged pool, and convert it
654 // into pages
655 //
656 i = ((ULONG_PTR)StartingVa - (ULONG_PTR)MmPagedPoolStart) >> PAGE_SHIFT;
657 End = i;
658
659 //
660 // Now use the end bitmap to scan until we find a set bit, meaning that
661 // this allocation finishes here
662 //
663 while (!RtlTestBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End)) End++;
664
665 //
666 // Now calculate the total number of pages this allocation spans
667 //
668 NumberOfPages = End - i + 1;
669
670 //
671 // Acquire the paged pool lock
672 //
673 KeAcquireGuardedMutex(&MmPagedPoolMutex);
674
675 //
676 // Clear the allocation and free bits
677 //
678 RtlClearBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, i);
679 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, i, NumberOfPages);
680
681 //
682 // Update the hint if we need to
683 //
684 if (i < MmPagedPoolInfo.PagedPoolHint) MmPagedPoolInfo.PagedPoolHint = i;
685
686 //
687 // Release the lock protecting the bitmaps
688 //
689 KeReleaseGuardedMutex(&MmPagedPoolMutex);
690
691 //
692 // And finally return the number of pages freed
693 //
694 return NumberOfPages;
695 }
696
697 //
698 // Get the first PTE and its corresponding PFN entry
699 //
700 StartPte = PointerPte = MiAddressToPte(StartingVa);
701 StartPfn = Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
702
703 //
704 // Loop until we find the last PTE
705 //
706 while (Pfn1->u3.e1.EndOfAllocation == 0)
707 {
708 //
709 // Keep going
710 //
711 PointerPte++;
712 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
713 }
714
715 //
716 // Now we know how many pages we have
717 //
718 NumberOfPages = PointerPte - StartPte + 1;
719
720 //
721 // Acquire the nonpaged pool lock
722 //
723 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
724
725 //
726 // Mark the first and last PTEs as not part of an allocation anymore
727 //
728 StartPfn->u3.e1.StartOfAllocation = 0;
729 Pfn1->u3.e1.EndOfAllocation = 0;
730
731 //
732 // Assume we will free as many pages as the allocation was
733 //
734 FreePages = NumberOfPages;
735
736 //
737 // Peek one page past the end of the allocation
738 //
739 PointerPte++;
740
741 //
742 // Guard against going past initial nonpaged pool
743 //
744 if (MiGetPfnEntryIndex(Pfn1) == MiEndOfInitialPoolFrame)
745 {
746 //
747 // This page is on the outskirts of initial nonpaged pool, so ignore it
748 //
749 Pfn1 = NULL;
750 }
751 else
752 {
753 //
754 // Otherwise, our entire allocation must've fit within the initial non
755 // paged pool, or the expansion nonpaged pool, so get the PFN entry of
756 // the next allocation
757 //
758 ASSERT((ULONG_PTR)StartingVa + NumberOfPages <= (ULONG_PTR)MmNonPagedPoolEnd);
759 if (PointerPte->u.Hard.Valid == 1)
760 {
761 //
762 // It's either expansion or initial: get the PFN entry
763 //
764 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
765 }
766 else
767 {
768 //
769 // This means we've reached the guard page that protects the end of
770 // the expansion nonpaged pool
771 //
772 Pfn1 = NULL;
773 }
774
775 }
776
777 //
778 // Check if this allocation actually exists
779 //
780 if ((Pfn1) && (Pfn1->u3.e1.StartOfAllocation == 0))
781 {
782 //
783 // It doesn't, so we should actually locate a free entry descriptor
784 //
785 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa +
786 (NumberOfPages << PAGE_SHIFT));
787 ASSERT(FreeEntry->Owner == FreeEntry);
788
789 //
790 // Consume this entry's pages, and remove it from its free list
791 //
792 FreePages += FreeEntry->Size;
793 RemoveEntryList (&FreeEntry->List);
794 }
795
796 //
797 // Now get the official free entry we'll create for the caller's allocation
798 //
799 FreeEntry = StartingVa;
800
801 //
802 // Check if the our allocation is the very first page
803 //
804 if (MiGetPfnEntryIndex(StartPfn) == MiStartOfInitialPoolFrame)
805 {
806 //
807 // Then we can't do anything or we'll risk underflowing
808 //
809 Pfn1 = NULL;
810 }
811 else
812 {
813 //
814 // Otherwise, get the PTE for the page right before our allocation
815 //
816 PointerPte -= NumberOfPages + 1;
817 if (PointerPte->u.Hard.Valid == 1)
818 {
819 //
820 // It's either expansion or initial nonpaged pool, get the PFN entry
821 //
822 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
823 }
824 else
825 {
826 //
827 // We must've reached the guard page, so don't risk touching it
828 //
829 Pfn1 = NULL;
830 }
831 }
832
833 //
834 // Check if there is a valid PFN entry for the page before the allocation
835 // and then check if this page was actually the end of an allocation.
836 // If it wasn't, then we know for sure it's a free page
837 //
838 if ((Pfn1) && (Pfn1->u3.e1.EndOfAllocation == 0))
839 {
840 //
841 // Get the free entry descriptor for that given page range
842 //
843 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa - PAGE_SIZE);
844 FreeEntry = FreeEntry->Owner;
845
846 //
847 // Check if the entry is small enough to be indexed on a free list
848 // If it is, we'll want to re-insert it, since we're about to
849 // collapse our pages on top of it, which will change its count
850 //
851 if (FreeEntry->Size < (MI_MAX_FREE_PAGE_LISTS - 1))
852 {
853 //
854 // Remove the list from where it is now
855 //
856 RemoveEntryList(&FreeEntry->List);
857
858 //
859 // Update its size
860 //
861 FreeEntry->Size += FreePages;
862
863 //
864 // And now find the new appropriate list to place it in
865 //
866 i = (ULONG)(FreeEntry->Size - 1);
867 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
868
869 //
870 // Do it
871 //
872 InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
873 }
874 else
875 {
876 //
877 // Otherwise, just combine our free pages into this entry
878 //
879 FreeEntry->Size += FreePages;
880 }
881 }
882
883 //
884 // Check if we were unable to do any compaction, and we'll stick with this
885 //
886 if (FreeEntry == StartingVa)
887 {
888 //
889 // Well, now we are a free entry. At worse we just have our newly freed
890 // pages, at best we have our pages plus whatever entry came after us
891 //
892 FreeEntry->Size = FreePages;
893
894 //
895 // Find the appropriate list we should be on
896 //
897 i = FreeEntry->Size - 1;
898 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
899
900 //
901 // And insert us
902 //
903 InsertTailList (&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
904 }
905
906 //
907 // Just a sanity check
908 //
909 ASSERT(FreePages != 0);
910
911 //
912 // Get all the pages between our allocation and its end. These will all now
913 // become free page chunks.
914 //
915 NextEntry = StartingVa;
916 LastEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + (FreePages << PAGE_SHIFT));
917 do
918 {
919 //
920 // Link back to the parent free entry, and keep going
921 //
922 NextEntry->Owner = FreeEntry;
923 NextEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + PAGE_SIZE);
924 } while (NextEntry != LastEntry);
925
926 //
927 // We're done, release the lock and let the caller know how much we freed
928 //
929 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
930 return NumberOfPages;
931 }
932
933
934 BOOLEAN
935 NTAPI
936 MiRaisePoolQuota(IN POOL_TYPE PoolType,
937 IN ULONG CurrentMaxQuota,
938 OUT PULONG NewMaxQuota)
939 {
940 //
941 // Not implemented
942 //
943 UNIMPLEMENTED;
944 *NewMaxQuota = CurrentMaxQuota + 65536;
945 return TRUE;
946 }
947
948 /* PUBLIC FUNCTIONS ***********************************************************/
949
950 /*
951 * @unimplemented
952 */
953 PVOID
954 NTAPI
955 MmAllocateMappingAddress(IN SIZE_T NumberOfBytes,
956 IN ULONG PoolTag)
957 {
958 UNIMPLEMENTED;
959 return NULL;
960 }
961
962 /*
963 * @unimplemented
964 */
965 VOID
966 NTAPI
967 MmFreeMappingAddress(IN PVOID BaseAddress,
968 IN ULONG PoolTag)
969 {
970 UNIMPLEMENTED;
971 }
972
973 /* EOF */