Sync with trunk (r47116), hopefully without breaking anything.
[reactos.git] / ntoskrnl / mm / ARM3 / pool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/pool.c
5 * PURPOSE: ARM Memory Manager Pool Allocator
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::POOL"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
18
19 /* GLOBALS ********************************************************************/
20
21 LIST_ENTRY MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
22 PFN_NUMBER MmNumberOfFreeNonPagedPool, MiExpansionPoolPagesInitialCharge;
23 PVOID MmNonPagedPoolEnd0;
24 PFN_NUMBER MiStartOfInitialPoolFrame, MiEndOfInitialPoolFrame;
25 KGUARDED_MUTEX MmPagedPoolMutex;
26 MM_PAGED_POOL_INFO MmPagedPoolInfo;
27 SIZE_T MmAllocatedNonPagedPool;
28 ULONG MmSpecialPoolTag;
29 ULONG MmConsumedPoolPercentage;
30 BOOLEAN MmProtectFreedNonPagedPool;
31
32 /* PRIVATE FUNCTIONS **********************************************************/
33
34 VOID
35 NTAPI
36 MiInitializeNonPagedPoolThresholds(VOID)
37 {
38 PFN_NUMBER Size = MmMaximumNonPagedPoolInPages;
39
40 /* Default low threshold of 8MB or one third of nonpaged pool */
41 MiLowNonPagedPoolThreshold = (8 * _1MB) >> PAGE_SHIFT;
42 MiLowNonPagedPoolThreshold = min(MiLowNonPagedPoolThreshold, Size / 3);
43
44 /* Default high threshold of 20MB or 50% */
45 MiHighNonPagedPoolThreshold = (20 * _1MB) >> PAGE_SHIFT;
46 MiHighNonPagedPoolThreshold = min(MiHighNonPagedPoolThreshold, Size / 2);
47 ASSERT(MiLowNonPagedPoolThreshold < MiHighNonPagedPoolThreshold);
48 }
49
50 VOID
51 NTAPI
52 MiInitializePoolEvents(VOID)
53 {
54 KIRQL OldIrql;
55 PFN_NUMBER FreePoolInPages;
56
57 /* Lock paged pool */
58 KeAcquireGuardedMutex(&MmPagedPoolMutex);
59
60 /* Total size of the paged pool minus the allocated size, is free */
61 FreePoolInPages = MmSizeOfPagedPoolInPages - MmPagedPoolInfo.AllocatedPagedPool;
62
63 /* Check the initial state high state */
64 if (FreePoolInPages >= MiHighPagedPoolThreshold)
65 {
66 /* We have plenty of pool */
67 KeSetEvent(MiHighPagedPoolEvent, 0, FALSE);
68 }
69 else
70 {
71 /* We don't */
72 KeClearEvent(MiHighPagedPoolEvent);
73 }
74
75 /* Check the initial low state */
76 if (FreePoolInPages <= MiLowPagedPoolThreshold)
77 {
78 /* We're very low in free pool memory */
79 KeSetEvent(MiLowPagedPoolEvent, 0, FALSE);
80 }
81 else
82 {
83 /* We're not */
84 KeClearEvent(MiLowPagedPoolEvent);
85 }
86
87 /* Release the paged pool lock */
88 KeReleaseGuardedMutex(&MmPagedPoolMutex);
89
90 /* Now it's time for the nonpaged pool lock */
91 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
92
93 /* Free pages are the maximum minus what's been allocated */
94 FreePoolInPages = MmMaximumNonPagedPoolInPages - MmAllocatedNonPagedPool;
95
96 /* Check if we have plenty */
97 if (FreePoolInPages >= MiHighNonPagedPoolThreshold)
98 {
99 /* We do, set the event */
100 KeSetEvent(MiHighNonPagedPoolEvent, 0, FALSE);
101 }
102 else
103 {
104 /* We don't, clear the event */
105 KeClearEvent(MiHighNonPagedPoolEvent);
106 }
107
108 /* Check if we have very little */
109 if (FreePoolInPages <= MiLowNonPagedPoolThreshold)
110 {
111 /* We do, set the event */
112 KeSetEvent(MiLowNonPagedPoolEvent, 0, FALSE);
113 }
114 else
115 {
116 /* We don't, clear it */
117 KeClearEvent(MiLowNonPagedPoolEvent);
118 }
119
120 /* We're done, release the nonpaged pool lock */
121 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
122 }
123
124 VOID
125 NTAPI
126 MiInitializeNonPagedPool(VOID)
127 {
128 ULONG i;
129 PFN_NUMBER PoolPages;
130 PMMFREE_POOL_ENTRY FreeEntry, FirstEntry;
131 PMMPTE PointerPte;
132 PAGED_CODE();
133
134 //
135 // We keep 4 lists of free pages (4 lists help avoid contention)
136 //
137 for (i = 0; i < MI_MAX_FREE_PAGE_LISTS; i++)
138 {
139 //
140 // Initialize each of them
141 //
142 InitializeListHead(&MmNonPagedPoolFreeListHead[i]);
143 }
144
145 //
146 // Calculate how many pages the initial nonpaged pool has
147 //
148 PoolPages = BYTES_TO_PAGES(MmSizeOfNonPagedPoolInBytes);
149 MmNumberOfFreeNonPagedPool = PoolPages;
150
151 //
152 // Initialize the first free entry
153 //
154 FreeEntry = MmNonPagedPoolStart;
155 FirstEntry = FreeEntry;
156 FreeEntry->Size = PoolPages;
157 FreeEntry->Owner = FirstEntry;
158
159 //
160 // Insert it into the last list
161 //
162 InsertHeadList(&MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS - 1],
163 &FreeEntry->List);
164
165 //
166 // Now create free entries for every single other page
167 //
168 while (PoolPages-- > 1)
169 {
170 //
171 // Link them all back to the original entry
172 //
173 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)FreeEntry + PAGE_SIZE);
174 FreeEntry->Owner = FirstEntry;
175 }
176
177 //
178 // Validate and remember first allocated pool page
179 //
180 PointerPte = MiAddressToPte(MmNonPagedPoolStart);
181 ASSERT(PointerPte->u.Hard.Valid == 1);
182 MiStartOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
183
184 //
185 // Keep track of where initial nonpaged pool ends
186 //
187 MmNonPagedPoolEnd0 = (PVOID)((ULONG_PTR)MmNonPagedPoolStart +
188 MmSizeOfNonPagedPoolInBytes);
189
190 //
191 // Validate and remember last allocated pool page
192 //
193 PointerPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolEnd0 - 1));
194 ASSERT(PointerPte->u.Hard.Valid == 1);
195 MiEndOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
196
197 //
198 // Validate the first nonpaged pool expansion page (which is a guard page)
199 //
200 PointerPte = MiAddressToPte(MmNonPagedPoolExpansionStart);
201 ASSERT(PointerPte->u.Hard.Valid == 0);
202
203 //
204 // Calculate the size of the expansion region alone
205 //
206 MiExpansionPoolPagesInitialCharge =
207 BYTES_TO_PAGES(MmMaximumNonPagedPoolInBytes - MmSizeOfNonPagedPoolInBytes);
208
209 //
210 // Remove 2 pages, since there's a guard page on top and on the bottom
211 //
212 MiExpansionPoolPagesInitialCharge -= 2;
213
214 //
215 // Now initialize the nonpaged pool expansion PTE space. Remember there's a
216 // guard page on top so make sure to skip it. The bottom guard page will be
217 // guaranteed by the fact our size is off by one.
218 //
219 MiInitializeSystemPtes(PointerPte + 1,
220 MiExpansionPoolPagesInitialCharge,
221 NonPagedPoolExpansion);
222 }
223
224 PVOID
225 NTAPI
226 MiAllocatePoolPages(IN POOL_TYPE PoolType,
227 IN SIZE_T SizeInBytes)
228 {
229 PFN_NUMBER SizeInPages, PageFrameNumber;
230 ULONG i;
231 KIRQL OldIrql;
232 PLIST_ENTRY NextEntry, NextHead, LastHead;
233 PMMPTE PointerPte, StartPte;
234 MMPTE TempPte;
235 PMMPFN Pfn1;
236 PVOID BaseVa, BaseVaStart;
237 PMMFREE_POOL_ENTRY FreeEntry;
238 PKSPIN_LOCK_QUEUE LockQueue;
239
240 //
241 // Figure out how big the allocation is in pages
242 //
243 SizeInPages = BYTES_TO_PAGES(SizeInBytes);
244
245 //
246 // Handle paged pool
247 //
248 if (PoolType == PagedPool)
249 {
250 //
251 // Lock the paged pool mutex
252 //
253 KeAcquireGuardedMutex(&MmPagedPoolMutex);
254
255 //
256 // Find some empty allocation space
257 //
258 i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
259 SizeInPages,
260 MmPagedPoolInfo.PagedPoolHint);
261 if (i == 0xFFFFFFFF)
262 {
263 //
264 // Get the page bit count
265 //
266 i = ((SizeInPages - 1) / 1024) + 1;
267
268 //
269 // Check if there is enougn paged pool expansion space left
270 //
271 if (MmPagedPoolInfo.NextPdeForPagedPoolExpansion >
272 MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
273 {
274 //
275 // Out of memory!
276 //
277 DPRINT1("OUT OF PAGED POOL!!!\n");
278 KeReleaseGuardedMutex(&MmPagedPoolMutex);
279 return NULL;
280 }
281
282 //
283 // Check if we'll have to expand past the last PTE we have available
284 //
285 if (((i - 1) + MmPagedPoolInfo.NextPdeForPagedPoolExpansion) >
286 MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
287 {
288 //
289 // We can only support this much then
290 //
291 SizeInPages = MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool) -
292 MmPagedPoolInfo.NextPdeForPagedPoolExpansion +
293 1;
294 ASSERT(SizeInPages < i);
295 i = SizeInPages;
296 }
297 else
298 {
299 //
300 // Otherwise, there is plenty of space left for this expansion
301 //
302 SizeInPages = i;
303 }
304
305 //
306 // Get the template PTE we'll use to expand
307 //
308 TempPte = ValidKernelPte;
309
310 //
311 // Get the first PTE in expansion space
312 //
313 PointerPte = MmPagedPoolInfo.NextPdeForPagedPoolExpansion;
314 BaseVa = MiPteToAddress(PointerPte);
315 BaseVaStart = BaseVa;
316
317 //
318 // Lock the PFN database and loop pages
319 //
320 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
321 do
322 {
323 //
324 // It should not already be valid
325 //
326 ASSERT(PointerPte->u.Hard.Valid == 0);
327
328 //
329 // Request a paged pool page and write the PFN for it
330 //
331 PageFrameNumber = MmAllocPage(MC_PPOOL);
332 TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
333
334 //
335 // Save it into our double-buffered system page directory
336 //
337 MmSystemPagePtes[(ULONG_PTR)PointerPte & (PAGE_SIZE - 1) /
338 sizeof(MMPTE)] = TempPte;
339
340 //
341 // Write the actual PTE now
342 //
343 *PointerPte++ = TempPte;
344
345 //
346 // Move on to the next expansion address
347 //
348 BaseVa = (PVOID)((ULONG_PTR)BaseVa + PAGE_SIZE);
349 } while (--i > 0);
350
351 //
352 // Release the PFN database lock
353 //
354 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
355
356 //
357 // These pages are now available, clear their availablity bits
358 //
359 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap,
360 (MmPagedPoolInfo.NextPdeForPagedPoolExpansion -
361 MiAddressToPte(MmPagedPoolInfo.FirstPteForPagedPool)) *
362 1024,
363 SizeInPages * 1024);
364
365 //
366 // Update the next expansion location
367 //
368 MmPagedPoolInfo.NextPdeForPagedPoolExpansion += SizeInPages;
369
370 //
371 // Zero out the newly available memory
372 //
373 RtlZeroMemory(BaseVaStart, SizeInPages * PAGE_SIZE);
374
375 //
376 // Now try consuming the pages again
377 //
378 SizeInPages = BYTES_TO_PAGES(SizeInBytes);
379 i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
380 SizeInPages,
381 0);
382 if (i == 0xFFFFFFFF)
383 {
384 //
385 // Out of memory!
386 //
387 DPRINT1("OUT OF PAGED POOL!!!\n");
388 KeReleaseGuardedMutex(&MmPagedPoolMutex);
389 return NULL;
390 }
391 }
392
393 //
394 // Update the pool hint if the request was just one page
395 //
396 if (SizeInPages == 1) MmPagedPoolInfo.PagedPoolHint = i + 1;
397
398 //
399 // Update the end bitmap so we know the bounds of this allocation when
400 // the time comes to free it
401 //
402 RtlSetBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, i + SizeInPages - 1);
403
404 //
405 // Now we can release the lock (it mainly protects the bitmap)
406 //
407 KeReleaseGuardedMutex(&MmPagedPoolMutex);
408
409 //
410 // Now figure out where this allocation starts
411 //
412 BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT));
413
414 //
415 // Flush the TLB
416 //
417 KeFlushEntireTb(TRUE, TRUE);
418
419 //
420 // Setup a demand-zero writable PTE
421 //
422 TempPte.u.Long = 0;
423 MI_MAKE_WRITE_PAGE(&TempPte);
424
425 //
426 // Find the first and last PTE, then loop them all
427 //
428 PointerPte = MiAddressToPte(BaseVa);
429 StartPte = PointerPte + SizeInPages;
430 do
431 {
432 //
433 // Write the demand zero PTE and keep going
434 //
435 *PointerPte++ = TempPte;
436 } while (PointerPte < StartPte);
437
438 //
439 // Return the allocation address to the caller
440 //
441 return BaseVa;
442 }
443
444 //
445 // Allocations of less than 4 pages go into their individual buckets
446 //
447 i = SizeInPages - 1;
448 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
449
450 //
451 // Loop through all the free page lists based on the page index
452 //
453 NextHead = &MmNonPagedPoolFreeListHead[i];
454 LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
455
456 //
457 // Acquire the nonpaged pool lock
458 //
459 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
460 do
461 {
462 //
463 // Now loop through all the free page entries in this given list
464 //
465 NextEntry = NextHead->Flink;
466 while (NextEntry != NextHead)
467 {
468 //
469 // Grab the entry and see if it can handle our allocation
470 //
471 FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List);
472 if (FreeEntry->Size >= SizeInPages)
473 {
474 //
475 // It does, so consume the pages from here
476 //
477 FreeEntry->Size -= SizeInPages;
478
479 //
480 // The allocation will begin in this free page area
481 //
482 BaseVa = (PVOID)((ULONG_PTR)FreeEntry +
483 (FreeEntry->Size << PAGE_SHIFT));
484
485 //
486 // This is not a free page segment anymore
487 //
488 RemoveEntryList(&FreeEntry->List);
489
490 //
491 // However, check if its' still got space left
492 //
493 if (FreeEntry->Size != 0)
494 {
495 //
496 // Insert it back into a different list, based on its pages
497 //
498 i = FreeEntry->Size - 1;
499 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
500 InsertTailList (&MmNonPagedPoolFreeListHead[i],
501 &FreeEntry->List);
502 }
503
504 //
505 // Grab the PTE for this allocation
506 //
507 PointerPte = MiAddressToPte(BaseVa);
508 ASSERT(PointerPte->u.Hard.Valid == 1);
509
510 //
511 // Grab the PFN NextEntry and index
512 //
513 Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
514
515 //
516 // Now mark it as the beginning of an allocation
517 //
518 ASSERT(Pfn1->u3.e1.StartOfAllocation == 0);
519 Pfn1->u3.e1.StartOfAllocation = 1;
520
521 //
522 // Check if the allocation is larger than one page
523 //
524 if (SizeInPages != 1)
525 {
526 //
527 // Navigate to the last PFN entry and PTE
528 //
529 PointerPte += SizeInPages - 1;
530 ASSERT(PointerPte->u.Hard.Valid == 1);
531 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
532 }
533
534 //
535 // Mark this PFN as the last (might be the same as the first)
536 //
537 ASSERT(Pfn1->u3.e1.EndOfAllocation == 0);
538 Pfn1->u3.e1.EndOfAllocation = 1;
539
540 //
541 // Release the nonpaged pool lock, and return the allocation
542 //
543 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
544 return BaseVa;
545 }
546
547 //
548 // Try the next free page entry
549 //
550 NextEntry = FreeEntry->List.Flink;
551 }
552 } while (++NextHead < LastHead);
553
554 //
555 // If we got here, we're out of space.
556 // Start by releasing the lock
557 //
558 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
559
560 //
561 // Allocate some system PTEs
562 //
563 StartPte = MiReserveSystemPtes(SizeInPages, NonPagedPoolExpansion);
564 PointerPte = StartPte;
565 if (StartPte == NULL)
566 {
567 //
568 // Ran out of memory
569 //
570 DPRINT1("Out of NP Expansion Pool\n");
571 return NULL;
572 }
573
574 //
575 // Acquire the pool lock now
576 //
577 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
578
579 //
580 // Lock the PFN database too
581 //
582 LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueuePfnLock];
583 KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);
584
585 //
586 // Loop the pages
587 //
588 TempPte = ValidKernelPte;
589 do
590 {
591 //
592 // Allocate a page
593 //
594 PageFrameNumber = MmAllocPage(MC_NPPOOL);
595
596 //
597 // Get the PFN entry for it
598 //
599 Pfn1 = MiGetPfnEntry(PageFrameNumber);
600
601 //
602 // Write the PTE for it
603 //
604 TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
605 ASSERT(PointerPte->u.Hard.Valid == 0);
606 ASSERT(TempPte.u.Hard.Valid == 1);
607 *PointerPte++ = TempPte;
608 } while (--SizeInPages > 0);
609
610 //
611 // This is the last page
612 //
613 Pfn1->u3.e1.EndOfAllocation = 1;
614
615 //
616 // Get the first page and mark it as such
617 //
618 Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber);
619 Pfn1->u3.e1.StartOfAllocation = 1;
620
621 //
622 // Release the PFN and nonpaged pool lock
623 //
624 KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
625 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
626
627 //
628 // Return the address
629 //
630 return MiPteToAddress(StartPte);
631 }
632
633 ULONG
634 NTAPI
635 MiFreePoolPages(IN PVOID StartingVa)
636 {
637 PMMPTE PointerPte, StartPte;
638 PMMPFN Pfn1, StartPfn;
639 PFN_NUMBER FreePages, NumberOfPages;
640 KIRQL OldIrql;
641 PMMFREE_POOL_ENTRY FreeEntry, NextEntry, LastEntry;
642 ULONG i, End;
643
644 //
645 // Handle paged pool
646 //
647 if ((StartingVa >= MmPagedPoolStart) && (StartingVa <= MmPagedPoolEnd))
648 {
649 //
650 // Calculate the offset from the beginning of paged pool, and convert it
651 // into pages
652 //
653 i = ((ULONG_PTR)StartingVa - (ULONG_PTR)MmPagedPoolStart) >> PAGE_SHIFT;
654 End = i;
655
656 //
657 // Now use the end bitmap to scan until we find a set bit, meaning that
658 // this allocation finishes here
659 //
660 while (!RtlTestBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End)) End++;
661
662 //
663 // Now calculate the total number of pages this allocation spans
664 //
665 NumberOfPages = End - i + 1;
666
667 //
668 // Acquire the paged pool lock
669 //
670 KeAcquireGuardedMutex(&MmPagedPoolMutex);
671
672 //
673 // Clear the allocation and free bits
674 //
675 RtlClearBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, i);
676 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, i, NumberOfPages);
677
678 //
679 // Update the hint if we need to
680 //
681 if (i < MmPagedPoolInfo.PagedPoolHint) MmPagedPoolInfo.PagedPoolHint = i;
682
683 //
684 // Release the lock protecting the bitmaps
685 //
686 KeReleaseGuardedMutex(&MmPagedPoolMutex);
687
688 //
689 // And finally return the number of pages freed
690 //
691 return NumberOfPages;
692 }
693
694 //
695 // Get the first PTE and its corresponding PFN entry
696 //
697 StartPte = PointerPte = MiAddressToPte(StartingVa);
698 StartPfn = Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
699
700 //
701 // Loop until we find the last PTE
702 //
703 while (Pfn1->u3.e1.EndOfAllocation == 0)
704 {
705 //
706 // Keep going
707 //
708 PointerPte++;
709 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
710 }
711
712 //
713 // Now we know how many pages we have
714 //
715 NumberOfPages = PointerPte - StartPte + 1;
716
717 //
718 // Acquire the nonpaged pool lock
719 //
720 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
721
722 //
723 // Mark the first and last PTEs as not part of an allocation anymore
724 //
725 StartPfn->u3.e1.StartOfAllocation = 0;
726 Pfn1->u3.e1.EndOfAllocation = 0;
727
728 //
729 // Assume we will free as many pages as the allocation was
730 //
731 FreePages = NumberOfPages;
732
733 //
734 // Peek one page past the end of the allocation
735 //
736 PointerPte++;
737
738 //
739 // Guard against going past initial nonpaged pool
740 //
741 if (MiGetPfnEntryIndex(Pfn1) == MiEndOfInitialPoolFrame)
742 {
743 //
744 // This page is on the outskirts of initial nonpaged pool, so ignore it
745 //
746 Pfn1 = NULL;
747 }
748 else
749 {
750 //
751 // Otherwise, our entire allocation must've fit within the initial non
752 // paged pool, or the expansion nonpaged pool, so get the PFN entry of
753 // the next allocation
754 //
755 ASSERT((ULONG_PTR)StartingVa + NumberOfPages <= (ULONG_PTR)MmNonPagedPoolEnd);
756 if (PointerPte->u.Hard.Valid == 1)
757 {
758 //
759 // It's either expansion or initial: get the PFN entry
760 //
761 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
762 }
763 else
764 {
765 //
766 // This means we've reached the guard page that protects the end of
767 // the expansion nonpaged pool
768 //
769 Pfn1 = NULL;
770 }
771
772 }
773
774 //
775 // Check if this allocation actually exists
776 //
777 if ((Pfn1) && (Pfn1->u3.e1.StartOfAllocation == 0))
778 {
779 //
780 // It doesn't, so we should actually locate a free entry descriptor
781 //
782 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa +
783 (NumberOfPages << PAGE_SHIFT));
784 ASSERT(FreeEntry->Owner == FreeEntry);
785
786 //
787 // Consume this entry's pages, and remove it from its free list
788 //
789 FreePages += FreeEntry->Size;
790 RemoveEntryList (&FreeEntry->List);
791 }
792
793 //
794 // Now get the official free entry we'll create for the caller's allocation
795 //
796 FreeEntry = StartingVa;
797
798 //
799 // Check if the our allocation is the very first page
800 //
801 if (MiGetPfnEntryIndex(StartPfn) == MiStartOfInitialPoolFrame)
802 {
803 //
804 // Then we can't do anything or we'll risk underflowing
805 //
806 Pfn1 = NULL;
807 }
808 else
809 {
810 //
811 // Otherwise, get the PTE for the page right before our allocation
812 //
813 PointerPte -= NumberOfPages + 1;
814 if (PointerPte->u.Hard.Valid == 1)
815 {
816 //
817 // It's either expansion or initial nonpaged pool, get the PFN entry
818 //
819 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
820 }
821 else
822 {
823 //
824 // We must've reached the guard page, so don't risk touching it
825 //
826 Pfn1 = NULL;
827 }
828 }
829
830 //
831 // Check if there is a valid PFN entry for the page before the allocation
832 // and then check if this page was actually the end of an allocation.
833 // If it wasn't, then we know for sure it's a free page
834 //
835 if ((Pfn1) && (Pfn1->u3.e1.EndOfAllocation == 0))
836 {
837 //
838 // Get the free entry descriptor for that given page range
839 //
840 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa - PAGE_SIZE);
841 FreeEntry = FreeEntry->Owner;
842
843 //
844 // Check if the entry is small enough to be indexed on a free list
845 // If it is, we'll want to re-insert it, since we're about to
846 // collapse our pages on top of it, which will change its count
847 //
848 if (FreeEntry->Size < (MI_MAX_FREE_PAGE_LISTS - 1))
849 {
850 //
851 // Remove the list from where it is now
852 //
853 RemoveEntryList(&FreeEntry->List);
854
855 //
856 // Update its size
857 //
858 FreeEntry->Size += FreePages;
859
860 //
861 // And now find the new appropriate list to place it in
862 //
863 i = (ULONG)(FreeEntry->Size - 1);
864 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
865
866 //
867 // Do it
868 //
869 InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
870 }
871 else
872 {
873 //
874 // Otherwise, just combine our free pages into this entry
875 //
876 FreeEntry->Size += FreePages;
877 }
878 }
879
880 //
881 // Check if we were unable to do any compaction, and we'll stick with this
882 //
883 if (FreeEntry == StartingVa)
884 {
885 //
886 // Well, now we are a free entry. At worse we just have our newly freed
887 // pages, at best we have our pages plus whatever entry came after us
888 //
889 FreeEntry->Size = FreePages;
890
891 //
892 // Find the appropriate list we should be on
893 //
894 i = FreeEntry->Size - 1;
895 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
896
897 //
898 // And insert us
899 //
900 InsertTailList (&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
901 }
902
903 //
904 // Just a sanity check
905 //
906 ASSERT(FreePages != 0);
907
908 //
909 // Get all the pages between our allocation and its end. These will all now
910 // become free page chunks.
911 //
912 NextEntry = StartingVa;
913 LastEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + (FreePages << PAGE_SHIFT));
914 do
915 {
916 //
917 // Link back to the parent free entry, and keep going
918 //
919 NextEntry->Owner = FreeEntry;
920 NextEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + PAGE_SIZE);
921 } while (NextEntry != LastEntry);
922
923 //
924 // We're done, release the lock and let the caller know how much we freed
925 //
926 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
927 return NumberOfPages;
928 }
929
930
931 BOOLEAN
932 NTAPI
933 MiRaisePoolQuota(IN POOL_TYPE PoolType,
934 IN ULONG CurrentMaxQuota,
935 OUT PULONG NewMaxQuota)
936 {
937 //
938 // Not implemented
939 //
940 UNIMPLEMENTED;
941 *NewMaxQuota = CurrentMaxQuota + 65536;
942 return TRUE;
943 }
944
945 /* PUBLIC FUNCTIONS ***********************************************************/
946
947 /*
948 * @unimplemented
949 */
950 PVOID
951 NTAPI
952 MmAllocateMappingAddress(IN SIZE_T NumberOfBytes,
953 IN ULONG PoolTag)
954 {
955 UNIMPLEMENTED;
956 return NULL;
957 }
958
959 /*
960 * @unimplemented
961 */
962 VOID
963 NTAPI
964 MmFreeMappingAddress(IN PVOID BaseAddress,
965 IN ULONG PoolTag)
966 {
967 UNIMPLEMENTED;
968 }
969
970 /* EOF */