[CMAKE]
[reactos.git] / ntoskrnl / mm / ARM3 / pool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/pool.c
5 * PURPOSE: ARM Memory Manager Pool Allocator
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::POOL"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
18
19 /* GLOBALS ********************************************************************/
20
21 LIST_ENTRY MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
22 PFN_NUMBER MmNumberOfFreeNonPagedPool, MiExpansionPoolPagesInitialCharge;
23 PVOID MmNonPagedPoolEnd0;
24 PFN_NUMBER MiStartOfInitialPoolFrame, MiEndOfInitialPoolFrame;
25 KGUARDED_MUTEX MmPagedPoolMutex;
26 MM_PAGED_POOL_INFO MmPagedPoolInfo;
27 SIZE_T MmAllocatedNonPagedPool;
28 ULONG MmSpecialPoolTag;
29 ULONG MmConsumedPoolPercentage;
30 BOOLEAN MmProtectFreedNonPagedPool;
31
32 /* PRIVATE FUNCTIONS **********************************************************/
33
34 VOID
35 NTAPI
36 MiProtectFreeNonPagedPool(IN PVOID VirtualAddress,
37 IN ULONG PageCount)
38 {
39 PMMPTE PointerPte, LastPte;
40 MMPTE TempPte;
41
42 /* If pool is physical, can't protect PTEs */
43 if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) return;
44
45 /* Get PTE pointers and loop */
46 PointerPte = MiAddressToPte(VirtualAddress);
47 LastPte = PointerPte + PageCount;
48 do
49 {
50 /* Capture the PTE for safety */
51 TempPte = *PointerPte;
52
53 /* Mark it as an invalid PTE, set proto bit to recognize it as pool */
54 TempPte.u.Hard.Valid = 0;
55 TempPte.u.Soft.Prototype = 1;
56 MI_WRITE_INVALID_PTE(PointerPte, TempPte);
57 } while (++PointerPte < LastPte);
58
59 /* Flush the TLB */
60 KeFlushEntireTb(TRUE, TRUE);
61 }
62
63 BOOLEAN
64 NTAPI
65 MiUnProtectFreeNonPagedPool(IN PVOID VirtualAddress,
66 IN ULONG PageCount)
67 {
68 PMMPTE PointerPte;
69 MMPTE TempPte;
70 PFN_NUMBER UnprotectedPages = 0;
71
72 /* If pool is physical, can't protect PTEs */
73 if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) return FALSE;
74
75 /* Get, and capture the PTE */
76 PointerPte = MiAddressToPte(VirtualAddress);
77 TempPte = *PointerPte;
78
79 /* Loop protected PTEs */
80 while ((TempPte.u.Hard.Valid == 0) && (TempPte.u.Soft.Prototype == 1))
81 {
82 /* Unprotect the PTE */
83 TempPte.u.Hard.Valid = 1;
84 TempPte.u.Soft.Prototype = 0;
85 MI_WRITE_VALID_PTE(PointerPte, TempPte);
86
87 /* One more page */
88 if (++UnprotectedPages == PageCount) break;
89
90 /* Capture next PTE */
91 TempPte = *(++PointerPte);
92 }
93
94 /* Return if any pages were unprotected */
95 return UnprotectedPages ? TRUE : FALSE;
96 }
97
98 VOID
99 FORCEINLINE
100 MiProtectedPoolUnProtectLinks(IN PLIST_ENTRY Links,
101 OUT PVOID* PoolFlink,
102 OUT PVOID* PoolBlink)
103 {
104 BOOLEAN Safe;
105 PVOID PoolVa;
106
107 /* Initialize variables */
108 *PoolFlink = *PoolBlink = NULL;
109
110 /* Check if the list has entries */
111 if (IsListEmpty(Links) == FALSE)
112 {
113 /* We are going to need to forward link to do an insert */
114 PoolVa = Links->Flink;
115
116 /* So make it safe to access */
117 Safe = MiUnProtectFreeNonPagedPool(PoolVa, 1);
118 if (Safe) PoolFlink = PoolVa;
119 }
120
121 /* Are we going to need a backward link too? */
122 if (Links != Links->Blink)
123 {
124 /* Get the head's backward link for the insert */
125 PoolVa = Links->Blink;
126
127 /* Make it safe to access */
128 Safe = MiUnProtectFreeNonPagedPool(PoolVa, 1);
129 if (Safe) PoolBlink = PoolVa;
130 }
131 }
132
133 VOID
134 FORCEINLINE
135 MiProtectedPoolProtectLinks(IN PVOID PoolFlink,
136 IN PVOID PoolBlink)
137 {
138 /* Reprotect the pages, if they got unprotected earlier */
139 if (PoolFlink) MiProtectFreeNonPagedPool(PoolFlink, 1);
140 if (PoolBlink) MiProtectFreeNonPagedPool(PoolBlink, 1);
141 }
142
143 VOID
144 NTAPI
145 MiProtectedPoolInsertList(IN PLIST_ENTRY ListHead,
146 IN PLIST_ENTRY Entry,
147 IN BOOLEAN Critical)
148 {
149 PVOID PoolFlink, PoolBlink;
150
151 /* Make the list accessible */
152 MiProtectedPoolUnProtectLinks(ListHead, &PoolFlink, &PoolBlink);
153
154 /* Now insert in the right position */
155 Critical ? InsertHeadList(ListHead, Entry) : InsertTailList(ListHead, Entry);
156
157 /* And reprotect the pages containing the free links */
158 MiProtectedPoolProtectLinks(PoolFlink, PoolBlink);
159 }
160
161 VOID
162 NTAPI
163 MiProtectedPoolRemoveEntryList(IN PLIST_ENTRY Entry)
164 {
165 PVOID PoolFlink, PoolBlink;
166
167 /* Make the list accessible */
168 MiProtectedPoolUnProtectLinks(Entry, &PoolFlink, &PoolBlink);
169
170 /* Now remove */
171 RemoveEntryList(Entry);
172
173 /* And reprotect the pages containing the free links */
174 if (PoolFlink) MiProtectFreeNonPagedPool(PoolFlink, 1);
175 if (PoolBlink) MiProtectFreeNonPagedPool(PoolBlink, 1);
176 }
177
178 VOID
179 NTAPI
180 INIT_FUNCTION
181 MiInitializeNonPagedPoolThresholds(VOID)
182 {
183 PFN_NUMBER Size = MmMaximumNonPagedPoolInPages;
184
185 /* Default low threshold of 8MB or one third of nonpaged pool */
186 MiLowNonPagedPoolThreshold = (8 * _1MB) >> PAGE_SHIFT;
187 MiLowNonPagedPoolThreshold = min(MiLowNonPagedPoolThreshold, Size / 3);
188
189 /* Default high threshold of 20MB or 50% */
190 MiHighNonPagedPoolThreshold = (20 * _1MB) >> PAGE_SHIFT;
191 MiHighNonPagedPoolThreshold = min(MiHighNonPagedPoolThreshold, Size / 2);
192 ASSERT(MiLowNonPagedPoolThreshold < MiHighNonPagedPoolThreshold);
193 }
194
195 VOID
196 NTAPI
197 INIT_FUNCTION
198 MiInitializePoolEvents(VOID)
199 {
200 KIRQL OldIrql;
201 PFN_NUMBER FreePoolInPages;
202
203 /* Lock paged pool */
204 KeAcquireGuardedMutex(&MmPagedPoolMutex);
205
206 /* Total size of the paged pool minus the allocated size, is free */
207 FreePoolInPages = MmSizeOfPagedPoolInPages - MmPagedPoolInfo.AllocatedPagedPool;
208
209 /* Check the initial state high state */
210 if (FreePoolInPages >= MiHighPagedPoolThreshold)
211 {
212 /* We have plenty of pool */
213 KeSetEvent(MiHighPagedPoolEvent, 0, FALSE);
214 }
215 else
216 {
217 /* We don't */
218 KeClearEvent(MiHighPagedPoolEvent);
219 }
220
221 /* Check the initial low state */
222 if (FreePoolInPages <= MiLowPagedPoolThreshold)
223 {
224 /* We're very low in free pool memory */
225 KeSetEvent(MiLowPagedPoolEvent, 0, FALSE);
226 }
227 else
228 {
229 /* We're not */
230 KeClearEvent(MiLowPagedPoolEvent);
231 }
232
233 /* Release the paged pool lock */
234 KeReleaseGuardedMutex(&MmPagedPoolMutex);
235
236 /* Now it's time for the nonpaged pool lock */
237 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
238
239 /* Free pages are the maximum minus what's been allocated */
240 FreePoolInPages = MmMaximumNonPagedPoolInPages - MmAllocatedNonPagedPool;
241
242 /* Check if we have plenty */
243 if (FreePoolInPages >= MiHighNonPagedPoolThreshold)
244 {
245 /* We do, set the event */
246 KeSetEvent(MiHighNonPagedPoolEvent, 0, FALSE);
247 }
248 else
249 {
250 /* We don't, clear the event */
251 KeClearEvent(MiHighNonPagedPoolEvent);
252 }
253
254 /* Check if we have very little */
255 if (FreePoolInPages <= MiLowNonPagedPoolThreshold)
256 {
257 /* We do, set the event */
258 KeSetEvent(MiLowNonPagedPoolEvent, 0, FALSE);
259 }
260 else
261 {
262 /* We don't, clear it */
263 KeClearEvent(MiLowNonPagedPoolEvent);
264 }
265
266 /* We're done, release the nonpaged pool lock */
267 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
268 }
269
270 VOID
271 NTAPI
272 INIT_FUNCTION
273 MiInitializeNonPagedPool(VOID)
274 {
275 ULONG i;
276 PFN_NUMBER PoolPages;
277 PMMFREE_POOL_ENTRY FreeEntry, FirstEntry;
278 PMMPTE PointerPte;
279 PAGED_CODE();
280
281 //
282 // We keep 4 lists of free pages (4 lists help avoid contention)
283 //
284 for (i = 0; i < MI_MAX_FREE_PAGE_LISTS; i++)
285 {
286 //
287 // Initialize each of them
288 //
289 InitializeListHead(&MmNonPagedPoolFreeListHead[i]);
290 }
291
292 //
293 // Calculate how many pages the initial nonpaged pool has
294 //
295 PoolPages = BYTES_TO_PAGES(MmSizeOfNonPagedPoolInBytes);
296 MmNumberOfFreeNonPagedPool = PoolPages;
297
298 //
299 // Initialize the first free entry
300 //
301 FreeEntry = MmNonPagedPoolStart;
302 FirstEntry = FreeEntry;
303 FreeEntry->Size = PoolPages;
304 FreeEntry->Signature = MM_FREE_POOL_SIGNATURE;
305 FreeEntry->Owner = FirstEntry;
306
307 //
308 // Insert it into the last list
309 //
310 InsertHeadList(&MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS - 1],
311 &FreeEntry->List);
312
313 //
314 // Now create free entries for every single other page
315 //
316 while (PoolPages-- > 1)
317 {
318 //
319 // Link them all back to the original entry
320 //
321 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)FreeEntry + PAGE_SIZE);
322 FreeEntry->Owner = FirstEntry;
323 FreeEntry->Signature = MM_FREE_POOL_SIGNATURE;
324 }
325
326 //
327 // Validate and remember first allocated pool page
328 //
329 PointerPte = MiAddressToPte(MmNonPagedPoolStart);
330 ASSERT(PointerPte->u.Hard.Valid == 1);
331 MiStartOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
332
333 //
334 // Keep track of where initial nonpaged pool ends
335 //
336 MmNonPagedPoolEnd0 = (PVOID)((ULONG_PTR)MmNonPagedPoolStart +
337 MmSizeOfNonPagedPoolInBytes);
338
339 //
340 // Validate and remember last allocated pool page
341 //
342 PointerPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolEnd0 - 1));
343 ASSERT(PointerPte->u.Hard.Valid == 1);
344 MiEndOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
345
346 //
347 // Validate the first nonpaged pool expansion page (which is a guard page)
348 //
349 PointerPte = MiAddressToPte(MmNonPagedPoolExpansionStart);
350 ASSERT(PointerPte->u.Hard.Valid == 0);
351
352 //
353 // Calculate the size of the expansion region alone
354 //
355 MiExpansionPoolPagesInitialCharge =
356 BYTES_TO_PAGES(MmMaximumNonPagedPoolInBytes - MmSizeOfNonPagedPoolInBytes);
357
358 //
359 // Remove 2 pages, since there's a guard page on top and on the bottom
360 //
361 MiExpansionPoolPagesInitialCharge -= 2;
362
363 //
364 // Now initialize the nonpaged pool expansion PTE space. Remember there's a
365 // guard page on top so make sure to skip it. The bottom guard page will be
366 // guaranteed by the fact our size is off by one.
367 //
368 MiInitializeSystemPtes(PointerPte + 1,
369 MiExpansionPoolPagesInitialCharge,
370 NonPagedPoolExpansion);
371 }
372
373 PVOID
374 NTAPI
375 MiAllocatePoolPages(IN POOL_TYPE PoolType,
376 IN SIZE_T SizeInBytes)
377 {
378 PFN_NUMBER SizeInPages, PageFrameNumber, PageTableCount;
379 ULONG i;
380 KIRQL OldIrql;
381 PLIST_ENTRY NextEntry, NextHead, LastHead;
382 PMMPTE PointerPte, StartPte;
383 PMMPDE PointerPde;
384 ULONG EndAllocation;
385 MMPTE TempPte;
386 MMPDE TempPde;
387 PMMPFN Pfn1;
388 PVOID BaseVa, BaseVaStart;
389 PMMFREE_POOL_ENTRY FreeEntry;
390 PKSPIN_LOCK_QUEUE LockQueue;
391
392 //
393 // Figure out how big the allocation is in pages
394 //
395 SizeInPages = BYTES_TO_PAGES(SizeInBytes);
396
397 //
398 // Handle paged pool
399 //
400 if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool)
401 {
402 //
403 // Lock the paged pool mutex
404 //
405 KeAcquireGuardedMutex(&MmPagedPoolMutex);
406
407 //
408 // Find some empty allocation space
409 //
410 i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
411 SizeInPages,
412 MmPagedPoolInfo.PagedPoolHint);
413 if (i == 0xFFFFFFFF)
414 {
415 //
416 // Get the page bit count
417 //
418 i = ((SizeInPages - 1) / PTE_COUNT) + 1;
419 DPRINT1("Paged pool expansion: %d %x\n", i, SizeInPages);
420
421 //
422 // Check if there is enougn paged pool expansion space left
423 //
424 if (MmPagedPoolInfo.NextPdeForPagedPoolExpansion >
425 (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
426 {
427 //
428 // Out of memory!
429 //
430 DPRINT1("OUT OF PAGED POOL!!!\n");
431 KeReleaseGuardedMutex(&MmPagedPoolMutex);
432 return NULL;
433 }
434
435 //
436 // Check if we'll have to expand past the last PTE we have available
437 //
438 if (((i - 1) + MmPagedPoolInfo.NextPdeForPagedPoolExpansion) >
439 (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
440 {
441 //
442 // We can only support this much then
443 //
444 PageTableCount = (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool) -
445 MmPagedPoolInfo.NextPdeForPagedPoolExpansion +
446 1;
447 ASSERT(PageTableCount < i);
448 i = PageTableCount;
449 }
450 else
451 {
452 //
453 // Otherwise, there is plenty of space left for this expansion
454 //
455 PageTableCount = i;
456 }
457
458 //
459 // Get the template PDE we'll use to expand
460 //
461 TempPde = ValidKernelPde;
462
463 //
464 // Get the first PTE in expansion space
465 //
466 PointerPde = MmPagedPoolInfo.NextPdeForPagedPoolExpansion;
467 BaseVa = MiPdeToAddress(PointerPde);
468 BaseVaStart = BaseVa;
469
470 //
471 // Lock the PFN database and loop pages
472 //
473 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
474 do
475 {
476 //
477 // It should not already be valid
478 //
479 ASSERT(PointerPde->u.Hard.Valid == 0);
480
481 /* Request a page */
482 MI_SET_USAGE(MI_USAGE_PAGED_POOL);
483 MI_SET_PROCESS2("Kernel");
484 PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());
485 TempPde.u.Hard.PageFrameNumber = PageFrameNumber;
486 #if (_MI_PAGING_LEVELS >= 3)
487 /* On PAE/x64 systems, there's no double-buffering */
488 ASSERT(FALSE);
489 #else
490 //
491 // Save it into our double-buffered system page directory
492 //
493 MmSystemPagePtes[((ULONG_PTR)PointerPde & (SYSTEM_PD_SIZE - 1)) / sizeof(MMPTE)] = TempPde;
494
495 /* Initialize the PFN */
496 MiInitializePfnForOtherProcess(PageFrameNumber,
497 (PMMPTE)PointerPde,
498 MmSystemPageDirectory[(PointerPde - MiAddressToPde(NULL)) / PDE_COUNT]);
499
500 /* Write the actual PDE now */
501 MI_WRITE_VALID_PDE(PointerPde, TempPde);
502 #endif
503 //
504 // Move on to the next expansion address
505 //
506 PointerPde++;
507 BaseVa = (PVOID)((ULONG_PTR)BaseVa + PAGE_SIZE);
508 i--;
509 } while (i > 0);
510
511 //
512 // Release the PFN database lock
513 //
514 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
515
516 //
517 // These pages are now available, clear their availablity bits
518 //
519 EndAllocation = (MmPagedPoolInfo.NextPdeForPagedPoolExpansion -
520 (PMMPDE)MiAddressToPte(MmPagedPoolInfo.FirstPteForPagedPool)) *
521 PTE_COUNT;
522 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap,
523 EndAllocation,
524 PageTableCount * PTE_COUNT);
525
526 //
527 // Update the next expansion location
528 //
529 MmPagedPoolInfo.NextPdeForPagedPoolExpansion += PageTableCount;
530
531 //
532 // Zero out the newly available memory
533 //
534 RtlZeroMemory(BaseVaStart, PageTableCount * PAGE_SIZE);
535
536 //
537 // Now try consuming the pages again
538 //
539 i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
540 SizeInPages,
541 0);
542 if (i == 0xFFFFFFFF)
543 {
544 //
545 // Out of memory!
546 //
547 DPRINT1("OUT OF PAGED POOL!!!\n");
548 KeReleaseGuardedMutex(&MmPagedPoolMutex);
549 return NULL;
550 }
551 }
552
553 //
554 // Update the pool hint if the request was just one page
555 //
556 if (SizeInPages == 1) MmPagedPoolInfo.PagedPoolHint = i + 1;
557
558 //
559 // Update the end bitmap so we know the bounds of this allocation when
560 // the time comes to free it
561 //
562 EndAllocation = i + SizeInPages - 1;
563 RtlSetBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, EndAllocation);
564
565 //
566 // Now we can release the lock (it mainly protects the bitmap)
567 //
568 KeReleaseGuardedMutex(&MmPagedPoolMutex);
569
570 //
571 // Now figure out where this allocation starts
572 //
573 BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT));
574
575 //
576 // Flush the TLB
577 //
578 KeFlushEntireTb(TRUE, TRUE);
579
580 /* Setup a demand-zero writable PTE */
581 MI_MAKE_SOFTWARE_PTE(&TempPte, MM_READWRITE);
582
583 //
584 // Find the first and last PTE, then loop them all
585 //
586 PointerPte = MiAddressToPte(BaseVa);
587 StartPte = PointerPte + SizeInPages;
588 do
589 {
590 //
591 // Write the demand zero PTE and keep going
592 //
593 MI_WRITE_INVALID_PTE(PointerPte, TempPte);
594 } while (++PointerPte < StartPte);
595
596 //
597 // Return the allocation address to the caller
598 //
599 return BaseVa;
600 }
601
602 //
603 // Allocations of less than 4 pages go into their individual buckets
604 //
605 i = SizeInPages - 1;
606 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
607
608 //
609 // Loop through all the free page lists based on the page index
610 //
611 NextHead = &MmNonPagedPoolFreeListHead[i];
612 LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
613
614 //
615 // Acquire the nonpaged pool lock
616 //
617 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
618 do
619 {
620 //
621 // Now loop through all the free page entries in this given list
622 //
623 NextEntry = NextHead->Flink;
624 while (NextEntry != NextHead)
625 {
626 /* Is freed non paged pool enabled */
627 if (MmProtectFreedNonPagedPool)
628 {
629 /* We need to be able to touch this page, unprotect it */
630 MiUnProtectFreeNonPagedPool(NextEntry, 0);
631 }
632
633 //
634 // Grab the entry and see if it can handle our allocation
635 //
636 FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List);
637 ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
638 if (FreeEntry->Size >= SizeInPages)
639 {
640 //
641 // It does, so consume the pages from here
642 //
643 FreeEntry->Size -= SizeInPages;
644
645 //
646 // The allocation will begin in this free page area
647 //
648 BaseVa = (PVOID)((ULONG_PTR)FreeEntry +
649 (FreeEntry->Size << PAGE_SHIFT));
650
651 /* Remove the item from the list, depending if pool is protected */
652 MmProtectFreedNonPagedPool ?
653 MiProtectedPoolRemoveEntryList(&FreeEntry->List) :
654 RemoveEntryList(&FreeEntry->List);
655
656 //
657 // However, check if its' still got space left
658 //
659 if (FreeEntry->Size != 0)
660 {
661 /* Check which list to insert this entry into */
662 i = FreeEntry->Size - 1;
663 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
664
665 /* Insert the entry into the free list head, check for prot. pool */
666 MmProtectFreedNonPagedPool ?
667 MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE) :
668 InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
669
670 /* Is freed non paged pool protected? */
671 if (MmProtectFreedNonPagedPool)
672 {
673 /* Protect the freed pool! */
674 MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
675 }
676 }
677
678 //
679 // Grab the PTE for this allocation
680 //
681 PointerPte = MiAddressToPte(BaseVa);
682 ASSERT(PointerPte->u.Hard.Valid == 1);
683
684 //
685 // Grab the PFN NextEntry and index
686 //
687 Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
688
689 //
690 // Now mark it as the beginning of an allocation
691 //
692 ASSERT(Pfn1->u3.e1.StartOfAllocation == 0);
693 Pfn1->u3.e1.StartOfAllocation = 1;
694
695 /* Mark it as special pool if needed */
696 ASSERT(Pfn1->u4.VerifierAllocation == 0);
697 if (PoolType & 64) Pfn1->u4.VerifierAllocation = 1;
698
699 //
700 // Check if the allocation is larger than one page
701 //
702 if (SizeInPages != 1)
703 {
704 //
705 // Navigate to the last PFN entry and PTE
706 //
707 PointerPte += SizeInPages - 1;
708 ASSERT(PointerPte->u.Hard.Valid == 1);
709 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
710 }
711
712 //
713 // Mark this PFN as the last (might be the same as the first)
714 //
715 ASSERT(Pfn1->u3.e1.EndOfAllocation == 0);
716 Pfn1->u3.e1.EndOfAllocation = 1;
717
718 //
719 // Release the nonpaged pool lock, and return the allocation
720 //
721 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
722 return BaseVa;
723 }
724
725 //
726 // Try the next free page entry
727 //
728 NextEntry = FreeEntry->List.Flink;
729
730 /* Is freed non paged pool protected? */
731 if (MmProtectFreedNonPagedPool)
732 {
733 /* Protect the freed pool! */
734 MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
735 }
736 }
737 } while (++NextHead < LastHead);
738
739 //
740 // If we got here, we're out of space.
741 // Start by releasing the lock
742 //
743 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
744
745 //
746 // Allocate some system PTEs
747 //
748 StartPte = MiReserveSystemPtes(SizeInPages, NonPagedPoolExpansion);
749 PointerPte = StartPte;
750 if (StartPte == NULL)
751 {
752 //
753 // Ran out of memory
754 //
755 DPRINT1("Out of NP Expansion Pool\n");
756 return NULL;
757 }
758
759 //
760 // Acquire the pool lock now
761 //
762 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
763
764 //
765 // Lock the PFN database too
766 //
767 LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueuePfnLock];
768 KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);
769
770 //
771 // Loop the pages
772 //
773 TempPte = ValidKernelPte;
774 do
775 {
776 /* Allocate a page */
777 MI_SET_USAGE(MI_USAGE_PAGED_POOL);
778 MI_SET_PROCESS2("Kernel");
779 PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());
780
781 /* Get the PFN entry for it and fill it out */
782 Pfn1 = MiGetPfnEntry(PageFrameNumber);
783 Pfn1->u3.e2.ReferenceCount = 1;
784 Pfn1->u2.ShareCount = 1;
785 Pfn1->PteAddress = PointerPte;
786 Pfn1->u3.e1.PageLocation = ActiveAndValid;
787 Pfn1->u4.VerifierAllocation = 0;
788
789 /* Write the PTE for it */
790 TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
791 MI_WRITE_VALID_PTE(PointerPte++, TempPte);
792 } while (--SizeInPages > 0);
793
794 //
795 // This is the last page
796 //
797 Pfn1->u3.e1.EndOfAllocation = 1;
798
799 //
800 // Get the first page and mark it as such
801 //
802 Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber);
803 Pfn1->u3.e1.StartOfAllocation = 1;
804
805 /* Mark it as a verifier allocation if needed */
806 ASSERT(Pfn1->u4.VerifierAllocation == 0);
807 if (PoolType & 64) Pfn1->u4.VerifierAllocation = 1;
808
809 //
810 // Release the PFN and nonpaged pool lock
811 //
812 KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
813 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
814
815 //
816 // Return the address
817 //
818 return MiPteToAddress(StartPte);
819 }
820
821 ULONG
822 NTAPI
823 MiFreePoolPages(IN PVOID StartingVa)
824 {
825 PMMPTE PointerPte, StartPte;
826 PMMPFN Pfn1, StartPfn;
827 PFN_NUMBER FreePages, NumberOfPages;
828 KIRQL OldIrql;
829 PMMFREE_POOL_ENTRY FreeEntry, NextEntry, LastEntry;
830 ULONG i, End;
831
832 //
833 // Handle paged pool
834 //
835 if ((StartingVa >= MmPagedPoolStart) && (StartingVa <= MmPagedPoolEnd))
836 {
837 //
838 // Calculate the offset from the beginning of paged pool, and convert it
839 // into pages
840 //
841 i = ((ULONG_PTR)StartingVa - (ULONG_PTR)MmPagedPoolStart) >> PAGE_SHIFT;
842 End = i;
843
844 //
845 // Now use the end bitmap to scan until we find a set bit, meaning that
846 // this allocation finishes here
847 //
848 while (!RtlTestBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End)) End++;
849
850 //
851 // Now calculate the total number of pages this allocation spans
852 //
853 NumberOfPages = End - i + 1;
854
855 /* Delete the actual pages */
856 PointerPte = MmPagedPoolInfo.FirstPteForPagedPool + i;
857 FreePages = MiDeleteSystemPageableVm(PointerPte, NumberOfPages, 0, NULL);
858 ASSERT(FreePages == NumberOfPages);
859
860 //
861 // Acquire the paged pool lock
862 //
863 KeAcquireGuardedMutex(&MmPagedPoolMutex);
864
865 //
866 // Clear the allocation and free bits
867 //
868 RtlClearBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, i);
869 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, i, NumberOfPages);
870
871 //
872 // Update the hint if we need to
873 //
874 if (i < MmPagedPoolInfo.PagedPoolHint) MmPagedPoolInfo.PagedPoolHint = i;
875
876 //
877 // Release the lock protecting the bitmaps
878 //
879 KeReleaseGuardedMutex(&MmPagedPoolMutex);
880
881 //
882 // And finally return the number of pages freed
883 //
884 return NumberOfPages;
885 }
886
887 //
888 // Get the first PTE and its corresponding PFN entry
889 //
890 StartPte = PointerPte = MiAddressToPte(StartingVa);
891 StartPfn = Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
892
893 //
894 // Loop until we find the last PTE
895 //
896 while (Pfn1->u3.e1.EndOfAllocation == 0)
897 {
898 //
899 // Keep going
900 //
901 PointerPte++;
902 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
903 }
904
905 //
906 // Now we know how many pages we have
907 //
908 NumberOfPages = PointerPte - StartPte + 1;
909
910 //
911 // Acquire the nonpaged pool lock
912 //
913 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
914
915 //
916 // Mark the first and last PTEs as not part of an allocation anymore
917 //
918 StartPfn->u3.e1.StartOfAllocation = 0;
919 Pfn1->u3.e1.EndOfAllocation = 0;
920
921 //
922 // Assume we will free as many pages as the allocation was
923 //
924 FreePages = NumberOfPages;
925
926 //
927 // Peek one page past the end of the allocation
928 //
929 PointerPte++;
930
931 //
932 // Guard against going past initial nonpaged pool
933 //
934 if (MiGetPfnEntryIndex(Pfn1) == MiEndOfInitialPoolFrame)
935 {
936 //
937 // This page is on the outskirts of initial nonpaged pool, so ignore it
938 //
939 Pfn1 = NULL;
940 }
941 else
942 {
943 /* Sanity check */
944 ASSERT((ULONG_PTR)StartingVa + NumberOfPages <= (ULONG_PTR)MmNonPagedPoolEnd);
945
946 /* Check if protected pool is enabled */
947 if (MmProtectFreedNonPagedPool)
948 {
949 /* The freed block will be merged, it must be made accessible */
950 MiUnProtectFreeNonPagedPool(MiPteToAddress(PointerPte), 0);
951 }
952
953 //
954 // Otherwise, our entire allocation must've fit within the initial non
955 // paged pool, or the expansion nonpaged pool, so get the PFN entry of
956 // the next allocation
957 //
958 if (PointerPte->u.Hard.Valid == 1)
959 {
960 //
961 // It's either expansion or initial: get the PFN entry
962 //
963 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
964 }
965 else
966 {
967 //
968 // This means we've reached the guard page that protects the end of
969 // the expansion nonpaged pool
970 //
971 Pfn1 = NULL;
972 }
973
974 }
975
976 //
977 // Check if this allocation actually exists
978 //
979 if ((Pfn1) && (Pfn1->u3.e1.StartOfAllocation == 0))
980 {
981 //
982 // It doesn't, so we should actually locate a free entry descriptor
983 //
984 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa +
985 (NumberOfPages << PAGE_SHIFT));
986 ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
987 ASSERT(FreeEntry->Owner == FreeEntry);
988
989 /* Consume this entry's pages */
990 FreePages += FreeEntry->Size;
991
992 /* Remove the item from the list, depending if pool is protected */
993 MmProtectFreedNonPagedPool ?
994 MiProtectedPoolRemoveEntryList(&FreeEntry->List) :
995 RemoveEntryList(&FreeEntry->List);
996 }
997
998 //
999 // Now get the official free entry we'll create for the caller's allocation
1000 //
1001 FreeEntry = StartingVa;
1002
1003 //
1004 // Check if the our allocation is the very first page
1005 //
1006 if (MiGetPfnEntryIndex(StartPfn) == MiStartOfInitialPoolFrame)
1007 {
1008 //
1009 // Then we can't do anything or we'll risk underflowing
1010 //
1011 Pfn1 = NULL;
1012 }
1013 else
1014 {
1015 //
1016 // Otherwise, get the PTE for the page right before our allocation
1017 //
1018 PointerPte -= NumberOfPages + 1;
1019
1020 /* Check if protected pool is enabled */
1021 if (MmProtectFreedNonPagedPool)
1022 {
1023 /* The freed block will be merged, it must be made accessible */
1024 MiUnProtectFreeNonPagedPool(MiPteToAddress(PointerPte), 0);
1025 }
1026
1027 /* Check if this is valid pool, or a guard page */
1028 if (PointerPte->u.Hard.Valid == 1)
1029 {
1030 //
1031 // It's either expansion or initial nonpaged pool, get the PFN entry
1032 //
1033 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
1034 }
1035 else
1036 {
1037 //
1038 // We must've reached the guard page, so don't risk touching it
1039 //
1040 Pfn1 = NULL;
1041 }
1042 }
1043
1044 //
1045 // Check if there is a valid PFN entry for the page before the allocation
1046 // and then check if this page was actually the end of an allocation.
1047 // If it wasn't, then we know for sure it's a free page
1048 //
1049 if ((Pfn1) && (Pfn1->u3.e1.EndOfAllocation == 0))
1050 {
1051 //
1052 // Get the free entry descriptor for that given page range
1053 //
1054 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa - PAGE_SIZE);
1055 ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
1056 FreeEntry = FreeEntry->Owner;
1057
1058 /* Check if protected pool is enabled */
1059 if (MmProtectFreedNonPagedPool)
1060 {
1061 /* The freed block will be merged, it must be made accessible */
1062 MiUnProtectFreeNonPagedPool(FreeEntry, 0);
1063 }
1064
1065 //
1066 // Check if the entry is small enough to be indexed on a free list
1067 // If it is, we'll want to re-insert it, since we're about to
1068 // collapse our pages on top of it, which will change its count
1069 //
1070 if (FreeEntry->Size < (MI_MAX_FREE_PAGE_LISTS - 1))
1071 {
1072 /* Remove the item from the list, depending if pool is protected */
1073 MmProtectFreedNonPagedPool ?
1074 MiProtectedPoolRemoveEntryList(&FreeEntry->List) :
1075 RemoveEntryList(&FreeEntry->List);
1076
1077 //
1078 // Update its size
1079 //
1080 FreeEntry->Size += FreePages;
1081
1082 //
1083 // And now find the new appropriate list to place it in
1084 //
1085 i = (ULONG)(FreeEntry->Size - 1);
1086 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
1087
1088 /* Insert the entry into the free list head, check for prot. pool */
1089 MmProtectFreedNonPagedPool ?
1090 MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE) :
1091 InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
1092 }
1093 else
1094 {
1095 //
1096 // Otherwise, just combine our free pages into this entry
1097 //
1098 FreeEntry->Size += FreePages;
1099 }
1100 }
1101
1102 //
1103 // Check if we were unable to do any compaction, and we'll stick with this
1104 //
1105 if (FreeEntry == StartingVa)
1106 {
1107 //
1108 // Well, now we are a free entry. At worse we just have our newly freed
1109 // pages, at best we have our pages plus whatever entry came after us
1110 //
1111 FreeEntry->Size = FreePages;
1112
1113 //
1114 // Find the appropriate list we should be on
1115 //
1116 i = FreeEntry->Size - 1;
1117 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
1118
1119 /* Insert the entry into the free list head, check for prot. pool */
1120 MmProtectFreedNonPagedPool ?
1121 MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE) :
1122 InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
1123 }
1124
1125 //
1126 // Just a sanity check
1127 //
1128 ASSERT(FreePages != 0);
1129
1130 //
1131 // Get all the pages between our allocation and its end. These will all now
1132 // become free page chunks.
1133 //
1134 NextEntry = StartingVa;
1135 LastEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + (FreePages << PAGE_SHIFT));
1136 do
1137 {
1138 //
1139 // Link back to the parent free entry, and keep going
1140 //
1141 NextEntry->Owner = FreeEntry;
1142 NextEntry->Signature = MM_FREE_POOL_SIGNATURE;
1143 NextEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + PAGE_SIZE);
1144 } while (NextEntry != LastEntry);
1145
1146 /* Is freed non paged pool protected? */
1147 if (MmProtectFreedNonPagedPool)
1148 {
1149 /* Protect the freed pool! */
1150 MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
1151 }
1152
1153 //
1154 // We're done, release the lock and let the caller know how much we freed
1155 //
1156 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
1157 return NumberOfPages;
1158 }
1159
1160
1161 BOOLEAN
1162 NTAPI
1163 MiRaisePoolQuota(IN POOL_TYPE PoolType,
1164 IN ULONG CurrentMaxQuota,
1165 OUT PULONG NewMaxQuota)
1166 {
1167 //
1168 // Not implemented
1169 //
1170 UNIMPLEMENTED;
1171 *NewMaxQuota = CurrentMaxQuota + 65536;
1172 return TRUE;
1173 }
1174
1175 /* PUBLIC FUNCTIONS ***********************************************************/
1176
1177 /*
1178 * @unimplemented
1179 */
1180 PVOID
1181 NTAPI
1182 MmAllocateMappingAddress(IN SIZE_T NumberOfBytes,
1183 IN ULONG PoolTag)
1184 {
1185 UNIMPLEMENTED;
1186 return NULL;
1187 }
1188
1189 /*
1190 * @unimplemented
1191 */
1192 VOID
1193 NTAPI
1194 MmFreeMappingAddress(IN PVOID BaseAddress,
1195 IN ULONG PoolTag)
1196 {
1197 UNIMPLEMENTED;
1198 }
1199
1200 /* EOF */