Sync with trunk head (part 1 of 2)
[reactos.git] / ntoskrnl / mm / ARM3 / pool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/pool.c
5 * PURPOSE: ARM Memory Manager Pool Allocator
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::POOL"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
18
19 /* GLOBALS ********************************************************************/
20
21 LIST_ENTRY MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
22 PFN_NUMBER MmNumberOfFreeNonPagedPool, MiExpansionPoolPagesInitialCharge;
23 PVOID MmNonPagedPoolEnd0;
24 PFN_NUMBER MiStartOfInitialPoolFrame, MiEndOfInitialPoolFrame;
25 KGUARDED_MUTEX MmPagedPoolMutex;
26 MM_PAGED_POOL_INFO MmPagedPoolInfo;
27 SIZE_T MmAllocatedNonPagedPool;
28 ULONG MmSpecialPoolTag;
29
30 /* PRIVATE FUNCTIONS **********************************************************/
31
32 VOID
33 NTAPI
34 MiInitializeNonPagedPoolThresholds(VOID)
35 {
36 PFN_NUMBER Size = MmMaximumNonPagedPoolInPages;
37
38 /* Default low threshold of 8MB or one third of nonpaged pool */
39 MiLowNonPagedPoolThreshold = (8 * _1MB) >> PAGE_SHIFT;
40 MiLowNonPagedPoolThreshold = min(MiLowNonPagedPoolThreshold, Size / 3);
41
42 /* Default high threshold of 20MB or 50% */
43 MiHighNonPagedPoolThreshold = (20 * _1MB) >> PAGE_SHIFT;
44 MiHighNonPagedPoolThreshold = min(MiHighNonPagedPoolThreshold, Size / 2);
45 ASSERT(MiLowNonPagedPoolThreshold < MiHighNonPagedPoolThreshold);
46 }
47
48 VOID
49 NTAPI
50 MiInitializePoolEvents(VOID)
51 {
52 KIRQL OldIrql;
53 PFN_NUMBER FreePoolInPages;
54
55 /* Lock paged pool */
56 KeAcquireGuardedMutex(&MmPagedPoolMutex);
57
58 /* Total size of the paged pool minus the allocated size, is free */
59 FreePoolInPages = MmSizeOfPagedPoolInPages - MmPagedPoolInfo.AllocatedPagedPool;
60
61 /* Check the initial state high state */
62 if (FreePoolInPages >= MiHighPagedPoolThreshold)
63 {
64 /* We have plenty of pool */
65 KeSetEvent(MiHighPagedPoolEvent, 0, FALSE);
66 }
67 else
68 {
69 /* We don't */
70 KeClearEvent(MiHighPagedPoolEvent);
71 }
72
73 /* Check the initial low state */
74 if (FreePoolInPages <= MiLowPagedPoolThreshold)
75 {
76 /* We're very low in free pool memory */
77 KeSetEvent(MiLowPagedPoolEvent, 0, FALSE);
78 }
79 else
80 {
81 /* We're not */
82 KeClearEvent(MiLowPagedPoolEvent);
83 }
84
85 /* Release the paged pool lock */
86 KeReleaseGuardedMutex(&MmPagedPoolMutex);
87
88 /* Now it's time for the nonpaged pool lock */
89 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
90
91 /* Free pages are the maximum minus what's been allocated */
92 FreePoolInPages = MmMaximumNonPagedPoolInPages - MmAllocatedNonPagedPool;
93
94 /* Check if we have plenty */
95 if (FreePoolInPages >= MiHighNonPagedPoolThreshold)
96 {
97 /* We do, set the event */
98 KeSetEvent(MiHighNonPagedPoolEvent, 0, FALSE);
99 }
100 else
101 {
102 /* We don't, clear the event */
103 KeClearEvent(MiHighNonPagedPoolEvent);
104 }
105
106 /* Check if we have very little */
107 if (FreePoolInPages <= MiLowNonPagedPoolThreshold)
108 {
109 /* We do, set the event */
110 KeSetEvent(MiLowNonPagedPoolEvent, 0, FALSE);
111 }
112 else
113 {
114 /* We don't, clear it */
115 KeClearEvent(MiLowNonPagedPoolEvent);
116 }
117
118 /* We're done, release the nonpaged pool lock */
119 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
120 }
121
122 VOID
123 NTAPI
124 MiInitializeNonPagedPool(VOID)
125 {
126 ULONG i;
127 PFN_NUMBER PoolPages;
128 PMMFREE_POOL_ENTRY FreeEntry, FirstEntry;
129 PMMPTE PointerPte;
130 PAGED_CODE();
131
132 //
133 // We keep 4 lists of free pages (4 lists help avoid contention)
134 //
135 for (i = 0; i < MI_MAX_FREE_PAGE_LISTS; i++)
136 {
137 //
138 // Initialize each of them
139 //
140 InitializeListHead(&MmNonPagedPoolFreeListHead[i]);
141 }
142
143 //
144 // Calculate how many pages the initial nonpaged pool has
145 //
146 PoolPages = BYTES_TO_PAGES(MmSizeOfNonPagedPoolInBytes);
147 MmNumberOfFreeNonPagedPool = PoolPages;
148
149 //
150 // Initialize the first free entry
151 //
152 FreeEntry = MmNonPagedPoolStart;
153 FirstEntry = FreeEntry;
154 FreeEntry->Size = PoolPages;
155 FreeEntry->Owner = FirstEntry;
156
157 //
158 // Insert it into the last list
159 //
160 InsertHeadList(&MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS - 1],
161 &FreeEntry->List);
162
163 //
164 // Now create free entries for every single other page
165 //
166 while (PoolPages-- > 1)
167 {
168 //
169 // Link them all back to the original entry
170 //
171 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)FreeEntry + PAGE_SIZE);
172 FreeEntry->Owner = FirstEntry;
173 }
174
175 //
176 // Validate and remember first allocated pool page
177 //
178 PointerPte = MiAddressToPte(MmNonPagedPoolStart);
179 ASSERT(PointerPte->u.Hard.Valid == 1);
180 MiStartOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
181
182 //
183 // Keep track of where initial nonpaged pool ends
184 //
185 MmNonPagedPoolEnd0 = (PVOID)((ULONG_PTR)MmNonPagedPoolStart +
186 MmSizeOfNonPagedPoolInBytes);
187
188 //
189 // Validate and remember last allocated pool page
190 //
191 PointerPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolEnd0 - 1));
192 ASSERT(PointerPte->u.Hard.Valid == 1);
193 MiEndOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
194
195 //
196 // Validate the first nonpaged pool expansion page (which is a guard page)
197 //
198 PointerPte = MiAddressToPte(MmNonPagedPoolExpansionStart);
199 ASSERT(PointerPte->u.Hard.Valid == 0);
200
201 //
202 // Calculate the size of the expansion region alone
203 //
204 MiExpansionPoolPagesInitialCharge =
205 BYTES_TO_PAGES(MmMaximumNonPagedPoolInBytes - MmSizeOfNonPagedPoolInBytes);
206
207 //
208 // Remove 2 pages, since there's a guard page on top and on the bottom
209 //
210 MiExpansionPoolPagesInitialCharge -= 2;
211
212 //
213 // Now initialize the nonpaged pool expansion PTE space. Remember there's a
214 // guard page on top so make sure to skip it. The bottom guard page will be
215 // guaranteed by the fact our size is off by one.
216 //
217 MiInitializeSystemPtes(PointerPte + 1,
218 MiExpansionPoolPagesInitialCharge,
219 NonPagedPoolExpansion);
220 }
221
222 PVOID
223 NTAPI
224 MiAllocatePoolPages(IN POOL_TYPE PoolType,
225 IN SIZE_T SizeInBytes)
226 {
227 PFN_NUMBER SizeInPages, PageFrameNumber;
228 ULONG i;
229 KIRQL OldIrql;
230 PLIST_ENTRY NextEntry, NextHead, LastHead;
231 PMMPTE PointerPte, StartPte;
232 MMPTE TempPte;
233 PMMPFN Pfn1;
234 PVOID BaseVa, BaseVaStart;
235 PMMFREE_POOL_ENTRY FreeEntry;
236 PKSPIN_LOCK_QUEUE LockQueue;
237
238 //
239 // Figure out how big the allocation is in pages
240 //
241 SizeInPages = BYTES_TO_PAGES(SizeInBytes);
242
243 //
244 // Handle paged pool
245 //
246 if (PoolType == PagedPool)
247 {
248 //
249 // Lock the paged pool mutex
250 //
251 KeAcquireGuardedMutex(&MmPagedPoolMutex);
252
253 //
254 // Find some empty allocation space
255 //
256 i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
257 SizeInPages,
258 MmPagedPoolInfo.PagedPoolHint);
259 if (i == 0xFFFFFFFF)
260 {
261 //
262 // Get the page bit count
263 //
264 i = ((SizeInPages - 1) / 1024) + 1;
265
266 //
267 // Check if there is enougn paged pool expansion space left
268 //
269 if (MmPagedPoolInfo.NextPdeForPagedPoolExpansion >
270 MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
271 {
272 //
273 // Out of memory!
274 //
275 DPRINT1("OUT OF PAGED POOL!!!\n");
276 KeReleaseGuardedMutex(&MmPagedPoolMutex);
277 return NULL;
278 }
279
280 //
281 // Check if we'll have to expand past the last PTE we have available
282 //
283 if (((i - 1) + MmPagedPoolInfo.NextPdeForPagedPoolExpansion) >
284 MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
285 {
286 //
287 // We can only support this much then
288 //
289 SizeInPages = MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool) -
290 MmPagedPoolInfo.NextPdeForPagedPoolExpansion +
291 1;
292 ASSERT(SizeInPages < i);
293 i = SizeInPages;
294 }
295 else
296 {
297 //
298 // Otherwise, there is plenty of space left for this expansion
299 //
300 SizeInPages = i;
301 }
302
303 //
304 // Get the template PTE we'll use to expand
305 //
306 TempPte = ValidKernelPte;
307
308 //
309 // Get the first PTE in expansion space
310 //
311 PointerPte = MmPagedPoolInfo.NextPdeForPagedPoolExpansion;
312 BaseVa = MiPteToAddress(PointerPte);
313 BaseVaStart = BaseVa;
314
315 //
316 // Lock the PFN database and loop pages
317 //
318 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
319 do
320 {
321 //
322 // It should not already be valid
323 //
324 ASSERT(PointerPte->u.Hard.Valid == 0);
325
326 //
327 // Request a paged pool page and write the PFN for it
328 //
329 PageFrameNumber = MmAllocPage(MC_PPOOL);
330 TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
331
332 //
333 // Save it into our double-buffered system page directory
334 //
335 MmSystemPagePtes[(ULONG_PTR)PointerPte & (PAGE_SIZE - 1) /
336 sizeof(MMPTE)] = TempPte;
337
338 //
339 // Write the actual PTE now
340 //
341 *PointerPte++ = TempPte;
342
343 //
344 // Move on to the next expansion address
345 //
346 BaseVa = (PVOID)((ULONG_PTR)BaseVa + PAGE_SIZE);
347 } while (--i > 0);
348
349 //
350 // Release the PFN database lock
351 //
352 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
353
354 //
355 // These pages are now available, clear their availablity bits
356 //
357 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap,
358 (MmPagedPoolInfo.NextPdeForPagedPoolExpansion -
359 MiAddressToPte(MmPagedPoolInfo.FirstPteForPagedPool)) *
360 1024,
361 SizeInPages * 1024);
362
363 //
364 // Update the next expansion location
365 //
366 MmPagedPoolInfo.NextPdeForPagedPoolExpansion += SizeInPages;
367
368 //
369 // Zero out the newly available memory
370 //
371 RtlZeroMemory(BaseVaStart, SizeInPages * PAGE_SIZE);
372
373 //
374 // Now try consuming the pages again
375 //
376 SizeInPages = BYTES_TO_PAGES(SizeInBytes);
377 i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
378 SizeInPages,
379 0);
380 if (i == 0xFFFFFFFF)
381 {
382 //
383 // Out of memory!
384 //
385 DPRINT1("OUT OF PAGED POOL!!!\n");
386 KeReleaseGuardedMutex(&MmPagedPoolMutex);
387 return NULL;
388 }
389 }
390
391 //
392 // Update the pool hint if the request was just one page
393 //
394 if (SizeInPages == 1) MmPagedPoolInfo.PagedPoolHint = i + 1;
395
396 //
397 // Update the end bitmap so we know the bounds of this allocation when
398 // the time comes to free it
399 //
400 RtlSetBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, i + SizeInPages - 1);
401
402 //
403 // Now we can release the lock (it mainly protects the bitmap)
404 //
405 KeReleaseGuardedMutex(&MmPagedPoolMutex);
406
407 //
408 // Now figure out where this allocation starts
409 //
410 BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT));
411
412 //
413 // Flush the TLB
414 //
415 KeFlushEntireTb(TRUE, TRUE);
416
417 //
418 // Setup a demand-zero writable PTE
419 //
420 TempPte.u.Long = 0;
421 MI_MAKE_WRITE_PAGE(&TempPte);
422
423 //
424 // Find the first and last PTE, then loop them all
425 //
426 PointerPte = MiAddressToPte(BaseVa);
427 StartPte = PointerPte + SizeInPages;
428 do
429 {
430 //
431 // Write the demand zero PTE and keep going
432 //
433 *PointerPte++ = TempPte;
434 } while (PointerPte < StartPte);
435
436 //
437 // Return the allocation address to the caller
438 //
439 return BaseVa;
440 }
441
442 //
443 // Allocations of less than 4 pages go into their individual buckets
444 //
445 i = SizeInPages - 1;
446 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
447
448 //
449 // Loop through all the free page lists based on the page index
450 //
451 NextHead = &MmNonPagedPoolFreeListHead[i];
452 LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
453
454 //
455 // Acquire the nonpaged pool lock
456 //
457 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
458 do
459 {
460 //
461 // Now loop through all the free page entries in this given list
462 //
463 NextEntry = NextHead->Flink;
464 while (NextEntry != NextHead)
465 {
466 //
467 // Grab the entry and see if it can handle our allocation
468 //
469 FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List);
470 if (FreeEntry->Size >= SizeInPages)
471 {
472 //
473 // It does, so consume the pages from here
474 //
475 FreeEntry->Size -= SizeInPages;
476
477 //
478 // The allocation will begin in this free page area
479 //
480 BaseVa = (PVOID)((ULONG_PTR)FreeEntry +
481 (FreeEntry->Size << PAGE_SHIFT));
482
483 //
484 // This is not a free page segment anymore
485 //
486 RemoveEntryList(&FreeEntry->List);
487
488 //
489 // However, check if its' still got space left
490 //
491 if (FreeEntry->Size != 0)
492 {
493 //
494 // Insert it back into a different list, based on its pages
495 //
496 i = FreeEntry->Size - 1;
497 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
498 InsertTailList (&MmNonPagedPoolFreeListHead[i],
499 &FreeEntry->List);
500 }
501
502 //
503 // Grab the PTE for this allocation
504 //
505 PointerPte = MiAddressToPte(BaseVa);
506 ASSERT(PointerPte->u.Hard.Valid == 1);
507
508 //
509 // Grab the PFN NextEntry and index
510 //
511 Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
512
513 //
514 // Now mark it as the beginning of an allocation
515 //
516 ASSERT(Pfn1->u3.e1.StartOfAllocation == 0);
517 Pfn1->u3.e1.StartOfAllocation = 1;
518
519 //
520 // Check if the allocation is larger than one page
521 //
522 if (SizeInPages != 1)
523 {
524 //
525 // Navigate to the last PFN entry and PTE
526 //
527 PointerPte += SizeInPages - 1;
528 ASSERT(PointerPte->u.Hard.Valid == 1);
529 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
530 }
531
532 //
533 // Mark this PFN as the last (might be the same as the first)
534 //
535 ASSERT(Pfn1->u3.e1.EndOfAllocation == 0);
536 Pfn1->u3.e1.EndOfAllocation = 1;
537
538 //
539 // Release the nonpaged pool lock, and return the allocation
540 //
541 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
542 return BaseVa;
543 }
544
545 //
546 // Try the next free page entry
547 //
548 NextEntry = FreeEntry->List.Flink;
549 }
550 } while (++NextHead < LastHead);
551
552 //
553 // If we got here, we're out of space.
554 // Start by releasing the lock
555 //
556 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
557
558 //
559 // Allocate some system PTEs
560 //
561 StartPte = MiReserveSystemPtes(SizeInPages, NonPagedPoolExpansion);
562 PointerPte = StartPte;
563 if (StartPte == NULL)
564 {
565 //
566 // Ran out of memory
567 //
568 DPRINT1("Out of NP Expansion Pool\n");
569 return NULL;
570 }
571
572 //
573 // Acquire the pool lock now
574 //
575 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
576
577 //
578 // Lock the PFN database too
579 //
580 LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueuePfnLock];
581 KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);
582
583 //
584 // Loop the pages
585 //
586 TempPte = ValidKernelPte;
587 do
588 {
589 //
590 // Allocate a page
591 //
592 PageFrameNumber = MmAllocPage(MC_NPPOOL);
593
594 //
595 // Get the PFN entry for it
596 //
597 Pfn1 = MiGetPfnEntry(PageFrameNumber);
598
599 //
600 // Write the PTE for it
601 //
602 TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
603 ASSERT(PointerPte->u.Hard.Valid == 0);
604 ASSERT(TempPte.u.Hard.Valid == 1);
605 *PointerPte++ = TempPte;
606 } while (--SizeInPages > 0);
607
608 //
609 // This is the last page
610 //
611 Pfn1->u3.e1.EndOfAllocation = 1;
612
613 //
614 // Get the first page and mark it as such
615 //
616 Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber);
617 Pfn1->u3.e1.StartOfAllocation = 1;
618
619 //
620 // Release the PFN and nonpaged pool lock
621 //
622 KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
623 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
624
625 //
626 // Return the address
627 //
628 return MiPteToAddress(StartPte);
629 }
630
631 ULONG
632 NTAPI
633 MiFreePoolPages(IN PVOID StartingVa)
634 {
635 PMMPTE PointerPte, StartPte;
636 PMMPFN Pfn1, StartPfn;
637 PFN_NUMBER FreePages, NumberOfPages;
638 KIRQL OldIrql;
639 PMMFREE_POOL_ENTRY FreeEntry, NextEntry, LastEntry;
640 ULONG i, End;
641
642 //
643 // Handle paged pool
644 //
645 if ((StartingVa >= MmPagedPoolStart) && (StartingVa <= MmPagedPoolEnd))
646 {
647 //
648 // Calculate the offset from the beginning of paged pool, and convert it
649 // into pages
650 //
651 i = ((ULONG_PTR)StartingVa - (ULONG_PTR)MmPagedPoolStart) >> PAGE_SHIFT;
652 End = i;
653
654 //
655 // Now use the end bitmap to scan until we find a set bit, meaning that
656 // this allocation finishes here
657 //
658 while (!RtlTestBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End)) End++;
659
660 //
661 // Now calculate the total number of pages this allocation spans
662 //
663 NumberOfPages = End - i + 1;
664
665 //
666 // Acquire the paged pool lock
667 //
668 KeAcquireGuardedMutex(&MmPagedPoolMutex);
669
670 //
671 // Clear the allocation and free bits
672 //
673 RtlClearBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, i);
674 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, i, NumberOfPages);
675
676 //
677 // Update the hint if we need to
678 //
679 if (i < MmPagedPoolInfo.PagedPoolHint) MmPagedPoolInfo.PagedPoolHint = i;
680
681 //
682 // Release the lock protecting the bitmaps
683 //
684 KeReleaseGuardedMutex(&MmPagedPoolMutex);
685
686 //
687 // And finally return the number of pages freed
688 //
689 return NumberOfPages;
690 }
691
692 //
693 // Get the first PTE and its corresponding PFN entry
694 //
695 StartPte = PointerPte = MiAddressToPte(StartingVa);
696 StartPfn = Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
697
698 //
699 // Loop until we find the last PTE
700 //
701 while (Pfn1->u3.e1.EndOfAllocation == 0)
702 {
703 //
704 // Keep going
705 //
706 PointerPte++;
707 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
708 }
709
710 //
711 // Now we know how many pages we have
712 //
713 NumberOfPages = PointerPte - StartPte + 1;
714
715 //
716 // Acquire the nonpaged pool lock
717 //
718 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
719
720 //
721 // Mark the first and last PTEs as not part of an allocation anymore
722 //
723 StartPfn->u3.e1.StartOfAllocation = 0;
724 Pfn1->u3.e1.EndOfAllocation = 0;
725
726 //
727 // Assume we will free as many pages as the allocation was
728 //
729 FreePages = NumberOfPages;
730
731 //
732 // Peek one page past the end of the allocation
733 //
734 PointerPte++;
735
736 //
737 // Guard against going past initial nonpaged pool
738 //
739 if (MiGetPfnEntryIndex(Pfn1) == MiEndOfInitialPoolFrame)
740 {
741 //
742 // This page is on the outskirts of initial nonpaged pool, so ignore it
743 //
744 Pfn1 = NULL;
745 }
746 else
747 {
748 //
749 // Otherwise, our entire allocation must've fit within the initial non
750 // paged pool, or the expansion nonpaged pool, so get the PFN entry of
751 // the next allocation
752 //
753 ASSERT((ULONG_PTR)StartingVa + NumberOfPages <= (ULONG_PTR)MmNonPagedPoolEnd);
754 if (PointerPte->u.Hard.Valid == 1)
755 {
756 //
757 // It's either expansion or initial: get the PFN entry
758 //
759 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
760 }
761 else
762 {
763 //
764 // This means we've reached the guard page that protects the end of
765 // the expansion nonpaged pool
766 //
767 Pfn1 = NULL;
768 }
769
770 }
771
772 //
773 // Check if this allocation actually exists
774 //
775 if ((Pfn1) && (Pfn1->u3.e1.StartOfAllocation == 0))
776 {
777 //
778 // It doesn't, so we should actually locate a free entry descriptor
779 //
780 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa +
781 (NumberOfPages << PAGE_SHIFT));
782 ASSERT(FreeEntry->Owner == FreeEntry);
783
784 //
785 // Consume this entry's pages, and remove it from its free list
786 //
787 FreePages += FreeEntry->Size;
788 RemoveEntryList (&FreeEntry->List);
789 }
790
791 //
792 // Now get the official free entry we'll create for the caller's allocation
793 //
794 FreeEntry = StartingVa;
795
796 //
797 // Check if the our allocation is the very first page
798 //
799 if (MiGetPfnEntryIndex(StartPfn) == MiStartOfInitialPoolFrame)
800 {
801 //
802 // Then we can't do anything or we'll risk underflowing
803 //
804 Pfn1 = NULL;
805 }
806 else
807 {
808 //
809 // Otherwise, get the PTE for the page right before our allocation
810 //
811 PointerPte -= NumberOfPages + 1;
812 if (PointerPte->u.Hard.Valid == 1)
813 {
814 //
815 // It's either expansion or initial nonpaged pool, get the PFN entry
816 //
817 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
818 }
819 else
820 {
821 //
822 // We must've reached the guard page, so don't risk touching it
823 //
824 Pfn1 = NULL;
825 }
826 }
827
828 //
829 // Check if there is a valid PFN entry for the page before the allocation
830 // and then check if this page was actually the end of an allocation.
831 // If it wasn't, then we know for sure it's a free page
832 //
833 if ((Pfn1) && (Pfn1->u3.e1.EndOfAllocation == 0))
834 {
835 //
836 // Get the free entry descriptor for that given page range
837 //
838 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa - PAGE_SIZE);
839 FreeEntry = FreeEntry->Owner;
840
841 //
842 // Check if the entry is small enough to be indexed on a free list
843 // If it is, we'll want to re-insert it, since we're about to
844 // collapse our pages on top of it, which will change its count
845 //
846 if (FreeEntry->Size < (MI_MAX_FREE_PAGE_LISTS - 1))
847 {
848 //
849 // Remove the list from where it is now
850 //
851 RemoveEntryList(&FreeEntry->List);
852
853 //
854 // Update its size
855 //
856 FreeEntry->Size += FreePages;
857
858 //
859 // And now find the new appropriate list to place it in
860 //
861 i = (ULONG)(FreeEntry->Size - 1);
862 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
863
864 //
865 // Do it
866 //
867 InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
868 }
869 else
870 {
871 //
872 // Otherwise, just combine our free pages into this entry
873 //
874 FreeEntry->Size += FreePages;
875 }
876 }
877
878 //
879 // Check if we were unable to do any compaction, and we'll stick with this
880 //
881 if (FreeEntry == StartingVa)
882 {
883 //
884 // Well, now we are a free entry. At worse we just have our newly freed
885 // pages, at best we have our pages plus whatever entry came after us
886 //
887 FreeEntry->Size = FreePages;
888
889 //
890 // Find the appropriate list we should be on
891 //
892 i = FreeEntry->Size - 1;
893 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
894
895 //
896 // And insert us
897 //
898 InsertTailList (&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
899 }
900
901 //
902 // Just a sanity check
903 //
904 ASSERT(FreePages != 0);
905
906 //
907 // Get all the pages between our allocation and its end. These will all now
908 // become free page chunks.
909 //
910 NextEntry = StartingVa;
911 LastEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + (FreePages << PAGE_SHIFT));
912 do
913 {
914 //
915 // Link back to the parent free entry, and keep going
916 //
917 NextEntry->Owner = FreeEntry;
918 NextEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + PAGE_SIZE);
919 } while (NextEntry != LastEntry);
920
921 //
922 // We're done, release the lock and let the caller know how much we freed
923 //
924 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
925 return NumberOfPages;
926 }
927
928
929 BOOLEAN
930 NTAPI
931 MiRaisePoolQuota(IN POOL_TYPE PoolType,
932 IN ULONG CurrentMaxQuota,
933 OUT PULONG NewMaxQuota)
934 {
935 //
936 // Not implemented
937 //
938 UNIMPLEMENTED;
939 *NewMaxQuota = CurrentMaxQuota + 65536;
940 return TRUE;
941 }
942
943 /* PUBLIC FUNCTIONS ***********************************************************/
944
945 /*
946 * @unimplemented
947 */
948 PVOID
949 NTAPI
950 MmAllocateMappingAddress(IN SIZE_T NumberOfBytes,
951 IN ULONG PoolTag)
952 {
953 UNIMPLEMENTED;
954 return NULL;
955 }
956
957 /*
958 * @unimplemented
959 */
960 VOID
961 NTAPI
962 MmFreeMappingAddress(IN PVOID BaseAddress,
963 IN ULONG PoolTag)
964 {
965 UNIMPLEMENTED;
966 }
967
968 /* EOF */