[NTOSKRNL]
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / pool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/pool.c
5 * PURPOSE: ARM Memory Manager Pool Allocator
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include "../ARM3/miarm.h"
17
18 /* GLOBALS ********************************************************************/
19
20 LIST_ENTRY MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
21 PFN_NUMBER MmNumberOfFreeNonPagedPool, MiExpansionPoolPagesInitialCharge;
22 PVOID MmNonPagedPoolEnd0;
23 PFN_NUMBER MiStartOfInitialPoolFrame, MiEndOfInitialPoolFrame;
24 KGUARDED_MUTEX MmPagedPoolMutex;
25 MM_PAGED_POOL_INFO MmPagedPoolInfo;
26 SIZE_T MmAllocatedNonPagedPool;
27 ULONG MmSpecialPoolTag;
28 ULONG MmConsumedPoolPercentage;
29 BOOLEAN MmProtectFreedNonPagedPool;
30
31 /* PRIVATE FUNCTIONS **********************************************************/
32
33 VOID
34 NTAPI
35 MiProtectFreeNonPagedPool(IN PVOID VirtualAddress,
36 IN ULONG PageCount)
37 {
38 PMMPTE PointerPte, LastPte;
39 MMPTE TempPte;
40
41 /* If pool is physical, can't protect PTEs */
42 if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) return;
43
44 /* Get PTE pointers and loop */
45 PointerPte = MiAddressToPte(VirtualAddress);
46 LastPte = PointerPte + PageCount;
47 do
48 {
49 /* Capture the PTE for safety */
50 TempPte = *PointerPte;
51
52 /* Mark it as an invalid PTE, set proto bit to recognize it as pool */
53 TempPte.u.Hard.Valid = 0;
54 TempPte.u.Soft.Prototype = 1;
55 MI_WRITE_INVALID_PTE(PointerPte, TempPte);
56 } while (++PointerPte < LastPte);
57
58 /* Flush the TLB */
59 KeFlushEntireTb(TRUE, TRUE);
60 }
61
62 BOOLEAN
63 NTAPI
64 MiUnProtectFreeNonPagedPool(IN PVOID VirtualAddress,
65 IN ULONG PageCount)
66 {
67 PMMPTE PointerPte;
68 MMPTE TempPte;
69 PFN_NUMBER UnprotectedPages = 0;
70
71 /* If pool is physical, can't protect PTEs */
72 if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) return FALSE;
73
74 /* Get, and capture the PTE */
75 PointerPte = MiAddressToPte(VirtualAddress);
76 TempPte = *PointerPte;
77
78 /* Loop protected PTEs */
79 while ((TempPte.u.Hard.Valid == 0) && (TempPte.u.Soft.Prototype == 1))
80 {
81 /* Unprotect the PTE */
82 TempPte.u.Hard.Valid = 1;
83 TempPte.u.Soft.Prototype = 0;
84 MI_WRITE_VALID_PTE(PointerPte, TempPte);
85
86 /* One more page */
87 if (++UnprotectedPages == PageCount) break;
88
89 /* Capture next PTE */
90 TempPte = *(++PointerPte);
91 }
92
93 /* Return if any pages were unprotected */
94 return UnprotectedPages ? TRUE : FALSE;
95 }
96
97 VOID
98 FORCEINLINE
99 MiProtectedPoolUnProtectLinks(IN PLIST_ENTRY Links,
100 OUT PVOID* PoolFlink,
101 OUT PVOID* PoolBlink)
102 {
103 BOOLEAN Safe;
104 PVOID PoolVa;
105
106 /* Initialize variables */
107 *PoolFlink = *PoolBlink = NULL;
108
109 /* Check if the list has entries */
110 if (IsListEmpty(Links) == FALSE)
111 {
112 /* We are going to need to forward link to do an insert */
113 PoolVa = Links->Flink;
114
115 /* So make it safe to access */
116 Safe = MiUnProtectFreeNonPagedPool(PoolVa, 1);
117 if (Safe) PoolFlink = PoolVa;
118 }
119
120 /* Are we going to need a backward link too? */
121 if (Links != Links->Blink)
122 {
123 /* Get the head's backward link for the insert */
124 PoolVa = Links->Blink;
125
126 /* Make it safe to access */
127 Safe = MiUnProtectFreeNonPagedPool(PoolVa, 1);
128 if (Safe) PoolBlink = PoolVa;
129 }
130 }
131
132 VOID
133 FORCEINLINE
134 MiProtectedPoolProtectLinks(IN PVOID PoolFlink,
135 IN PVOID PoolBlink)
136 {
137 /* Reprotect the pages, if they got unprotected earlier */
138 if (PoolFlink) MiProtectFreeNonPagedPool(PoolFlink, 1);
139 if (PoolBlink) MiProtectFreeNonPagedPool(PoolBlink, 1);
140 }
141
142 VOID
143 NTAPI
144 MiProtectedPoolInsertList(IN PLIST_ENTRY ListHead,
145 IN PLIST_ENTRY Entry,
146 IN BOOLEAN Critical)
147 {
148 PVOID PoolFlink, PoolBlink;
149
150 /* Make the list accessible */
151 MiProtectedPoolUnProtectLinks(ListHead, &PoolFlink, &PoolBlink);
152
153 /* Now insert in the right position */
154 Critical ? InsertHeadList(ListHead, Entry) : InsertTailList(ListHead, Entry);
155
156 /* And reprotect the pages containing the free links */
157 MiProtectedPoolProtectLinks(PoolFlink, PoolBlink);
158 }
159
160 VOID
161 NTAPI
162 MiProtectedPoolRemoveEntryList(IN PLIST_ENTRY Entry)
163 {
164 PVOID PoolFlink, PoolBlink;
165
166 /* Make the list accessible */
167 MiProtectedPoolUnProtectLinks(Entry, &PoolFlink, &PoolBlink);
168
169 /* Now remove */
170 RemoveEntryList(Entry);
171
172 /* And reprotect the pages containing the free links */
173 if (PoolFlink) MiProtectFreeNonPagedPool(PoolFlink, 1);
174 if (PoolBlink) MiProtectFreeNonPagedPool(PoolBlink, 1);
175 }
176
177 VOID
178 NTAPI
179 INIT_FUNCTION
180 MiInitializeNonPagedPoolThresholds(VOID)
181 {
182 PFN_NUMBER Size = MmMaximumNonPagedPoolInPages;
183
184 /* Default low threshold of 8MB or one third of nonpaged pool */
185 MiLowNonPagedPoolThreshold = (8 * _1MB) >> PAGE_SHIFT;
186 MiLowNonPagedPoolThreshold = min(MiLowNonPagedPoolThreshold, Size / 3);
187
188 /* Default high threshold of 20MB or 50% */
189 MiHighNonPagedPoolThreshold = (20 * _1MB) >> PAGE_SHIFT;
190 MiHighNonPagedPoolThreshold = min(MiHighNonPagedPoolThreshold, Size / 2);
191 ASSERT(MiLowNonPagedPoolThreshold < MiHighNonPagedPoolThreshold);
192 }
193
194 VOID
195 NTAPI
196 INIT_FUNCTION
197 MiInitializePoolEvents(VOID)
198 {
199 KIRQL OldIrql;
200 PFN_NUMBER FreePoolInPages;
201
202 /* Lock paged pool */
203 KeAcquireGuardedMutex(&MmPagedPoolMutex);
204
205 /* Total size of the paged pool minus the allocated size, is free */
206 FreePoolInPages = MmSizeOfPagedPoolInPages - MmPagedPoolInfo.AllocatedPagedPool;
207
208 /* Check the initial state high state */
209 if (FreePoolInPages >= MiHighPagedPoolThreshold)
210 {
211 /* We have plenty of pool */
212 KeSetEvent(MiHighPagedPoolEvent, 0, FALSE);
213 }
214 else
215 {
216 /* We don't */
217 KeClearEvent(MiHighPagedPoolEvent);
218 }
219
220 /* Check the initial low state */
221 if (FreePoolInPages <= MiLowPagedPoolThreshold)
222 {
223 /* We're very low in free pool memory */
224 KeSetEvent(MiLowPagedPoolEvent, 0, FALSE);
225 }
226 else
227 {
228 /* We're not */
229 KeClearEvent(MiLowPagedPoolEvent);
230 }
231
232 /* Release the paged pool lock */
233 KeReleaseGuardedMutex(&MmPagedPoolMutex);
234
235 /* Now it's time for the nonpaged pool lock */
236 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
237
238 /* Free pages are the maximum minus what's been allocated */
239 FreePoolInPages = MmMaximumNonPagedPoolInPages - MmAllocatedNonPagedPool;
240
241 /* Check if we have plenty */
242 if (FreePoolInPages >= MiHighNonPagedPoolThreshold)
243 {
244 /* We do, set the event */
245 KeSetEvent(MiHighNonPagedPoolEvent, 0, FALSE);
246 }
247 else
248 {
249 /* We don't, clear the event */
250 KeClearEvent(MiHighNonPagedPoolEvent);
251 }
252
253 /* Check if we have very little */
254 if (FreePoolInPages <= MiLowNonPagedPoolThreshold)
255 {
256 /* We do, set the event */
257 KeSetEvent(MiLowNonPagedPoolEvent, 0, FALSE);
258 }
259 else
260 {
261 /* We don't, clear it */
262 KeClearEvent(MiLowNonPagedPoolEvent);
263 }
264
265 /* We're done, release the nonpaged pool lock */
266 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
267 }
268
269 VOID
270 NTAPI
271 INIT_FUNCTION
272 MiInitializeNonPagedPool(VOID)
273 {
274 ULONG i;
275 PFN_NUMBER PoolPages;
276 PMMFREE_POOL_ENTRY FreeEntry, FirstEntry;
277 PMMPTE PointerPte;
278 PAGED_CODE();
279
280 //
281 // We keep 4 lists of free pages (4 lists help avoid contention)
282 //
283 for (i = 0; i < MI_MAX_FREE_PAGE_LISTS; i++)
284 {
285 //
286 // Initialize each of them
287 //
288 InitializeListHead(&MmNonPagedPoolFreeListHead[i]);
289 }
290
291 //
292 // Calculate how many pages the initial nonpaged pool has
293 //
294 PoolPages = BYTES_TO_PAGES(MmSizeOfNonPagedPoolInBytes);
295 MmNumberOfFreeNonPagedPool = PoolPages;
296
297 //
298 // Initialize the first free entry
299 //
300 FreeEntry = MmNonPagedPoolStart;
301 FirstEntry = FreeEntry;
302 FreeEntry->Size = PoolPages;
303 FreeEntry->Signature = MM_FREE_POOL_SIGNATURE;
304 FreeEntry->Owner = FirstEntry;
305
306 //
307 // Insert it into the last list
308 //
309 InsertHeadList(&MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS - 1],
310 &FreeEntry->List);
311
312 //
313 // Now create free entries for every single other page
314 //
315 while (PoolPages-- > 1)
316 {
317 //
318 // Link them all back to the original entry
319 //
320 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)FreeEntry + PAGE_SIZE);
321 FreeEntry->Owner = FirstEntry;
322 FreeEntry->Signature = MM_FREE_POOL_SIGNATURE;
323 }
324
325 //
326 // Validate and remember first allocated pool page
327 //
328 PointerPte = MiAddressToPte(MmNonPagedPoolStart);
329 ASSERT(PointerPte->u.Hard.Valid == 1);
330 MiStartOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
331
332 //
333 // Keep track of where initial nonpaged pool ends
334 //
335 MmNonPagedPoolEnd0 = (PVOID)((ULONG_PTR)MmNonPagedPoolStart +
336 MmSizeOfNonPagedPoolInBytes);
337
338 //
339 // Validate and remember last allocated pool page
340 //
341 PointerPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolEnd0 - 1));
342 ASSERT(PointerPte->u.Hard.Valid == 1);
343 MiEndOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
344
345 //
346 // Validate the first nonpaged pool expansion page (which is a guard page)
347 //
348 PointerPte = MiAddressToPte(MmNonPagedPoolExpansionStart);
349 ASSERT(PointerPte->u.Hard.Valid == 0);
350
351 //
352 // Calculate the size of the expansion region alone
353 //
354 MiExpansionPoolPagesInitialCharge =
355 BYTES_TO_PAGES(MmMaximumNonPagedPoolInBytes - MmSizeOfNonPagedPoolInBytes);
356
357 //
358 // Remove 2 pages, since there's a guard page on top and on the bottom
359 //
360 MiExpansionPoolPagesInitialCharge -= 2;
361
362 //
363 // Now initialize the nonpaged pool expansion PTE space. Remember there's a
364 // guard page on top so make sure to skip it. The bottom guard page will be
365 // guaranteed by the fact our size is off by one.
366 //
367 MiInitializeSystemPtes(PointerPte + 1,
368 MiExpansionPoolPagesInitialCharge,
369 NonPagedPoolExpansion);
370 }
371
372 PVOID
373 NTAPI
374 MiAllocatePoolPages(IN POOL_TYPE PoolType,
375 IN SIZE_T SizeInBytes)
376 {
377 PFN_NUMBER SizeInPages, PageFrameNumber, PageTableCount;
378 ULONG i;
379 KIRQL OldIrql;
380 PLIST_ENTRY NextEntry, NextHead, LastHead;
381 PMMPTE PointerPte, StartPte;
382 PMMPDE PointerPde;
383 ULONG EndAllocation;
384 MMPTE TempPte;
385 MMPDE TempPde;
386 PMMPFN Pfn1;
387 PVOID BaseVa, BaseVaStart;
388 PMMFREE_POOL_ENTRY FreeEntry;
389 PKSPIN_LOCK_QUEUE LockQueue;
390
391 //
392 // Figure out how big the allocation is in pages
393 //
394 SizeInPages = BYTES_TO_PAGES(SizeInBytes);
395
396 //
397 // Handle paged pool
398 //
399 if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool)
400 {
401 //
402 // Lock the paged pool mutex
403 //
404 KeAcquireGuardedMutex(&MmPagedPoolMutex);
405
406 //
407 // Find some empty allocation space
408 //
409 i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
410 SizeInPages,
411 MmPagedPoolInfo.PagedPoolHint);
412 if (i == 0xFFFFFFFF)
413 {
414 //
415 // Get the page bit count
416 //
417 i = ((SizeInPages - 1) / PTE_COUNT) + 1;
418 DPRINT1("Paged pool expansion: %d %x\n", i, SizeInPages);
419
420 //
421 // Check if there is enougn paged pool expansion space left
422 //
423 if (MmPagedPoolInfo.NextPdeForPagedPoolExpansion >
424 (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
425 {
426 //
427 // Out of memory!
428 //
429 DPRINT1("OUT OF PAGED POOL!!!\n");
430 KeReleaseGuardedMutex(&MmPagedPoolMutex);
431 return NULL;
432 }
433
434 //
435 // Check if we'll have to expand past the last PTE we have available
436 //
437 if (((i - 1) + MmPagedPoolInfo.NextPdeForPagedPoolExpansion) >
438 (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
439 {
440 //
441 // We can only support this much then
442 //
443 PageTableCount = (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool) -
444 MmPagedPoolInfo.NextPdeForPagedPoolExpansion +
445 1;
446 ASSERT(PageTableCount < i);
447 i = PageTableCount;
448 }
449 else
450 {
451 //
452 // Otherwise, there is plenty of space left for this expansion
453 //
454 PageTableCount = i;
455 }
456
457 //
458 // Get the template PDE we'll use to expand
459 //
460 TempPde = ValidKernelPde;
461
462 //
463 // Get the first PTE in expansion space
464 //
465 PointerPde = MmPagedPoolInfo.NextPdeForPagedPoolExpansion;
466 BaseVa = MiPdeToAddress(PointerPde);
467 BaseVaStart = BaseVa;
468
469 //
470 // Lock the PFN database and loop pages
471 //
472 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
473 do
474 {
475 //
476 // It should not already be valid
477 //
478 ASSERT(PointerPde->u.Hard.Valid == 0);
479
480 /* Request a page */
481 MI_SET_USAGE(MI_USAGE_PAGED_POOL);
482 MI_SET_PROCESS2("Kernel");
483 PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());
484 TempPde.u.Hard.PageFrameNumber = PageFrameNumber;
485 #if (_MI_PAGING_LEVELS >= 3)
486 /* On PAE/x64 systems, there's no double-buffering */
487 ASSERT(FALSE);
488 #else
489 //
490 // Save it into our double-buffered system page directory
491 //
492 MmSystemPagePtes[((ULONG_PTR)PointerPde & (SYSTEM_PD_SIZE - 1)) / sizeof(MMPTE)] = TempPde;
493
494 /* Initialize the PFN */
495 MiInitializePfnForOtherProcess(PageFrameNumber,
496 (PMMPTE)PointerPde,
497 MmSystemPageDirectory[(PointerPde - MiAddressToPde(NULL)) / PDE_COUNT]);
498
499 /* Write the actual PDE now */
500 MI_WRITE_VALID_PDE(PointerPde, TempPde);
501 #endif
502 //
503 // Move on to the next expansion address
504 //
505 PointerPde++;
506 BaseVa = (PVOID)((ULONG_PTR)BaseVa + PAGE_SIZE);
507 i--;
508 } while (i > 0);
509
510 //
511 // Release the PFN database lock
512 //
513 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
514
515 //
516 // These pages are now available, clear their availablity bits
517 //
518 EndAllocation = (MmPagedPoolInfo.NextPdeForPagedPoolExpansion -
519 (PMMPDE)MiAddressToPte(MmPagedPoolInfo.FirstPteForPagedPool)) *
520 PTE_COUNT;
521 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap,
522 EndAllocation,
523 PageTableCount * PTE_COUNT);
524
525 //
526 // Update the next expansion location
527 //
528 MmPagedPoolInfo.NextPdeForPagedPoolExpansion += PageTableCount;
529
530 //
531 // Zero out the newly available memory
532 //
533 RtlZeroMemory(BaseVaStart, PageTableCount * PAGE_SIZE);
534
535 //
536 // Now try consuming the pages again
537 //
538 i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
539 SizeInPages,
540 0);
541 if (i == 0xFFFFFFFF)
542 {
543 //
544 // Out of memory!
545 //
546 DPRINT1("OUT OF PAGED POOL!!!\n");
547 KeReleaseGuardedMutex(&MmPagedPoolMutex);
548 return NULL;
549 }
550 }
551
552 //
553 // Update the pool hint if the request was just one page
554 //
555 if (SizeInPages == 1) MmPagedPoolInfo.PagedPoolHint = i + 1;
556
557 //
558 // Update the end bitmap so we know the bounds of this allocation when
559 // the time comes to free it
560 //
561 EndAllocation = i + SizeInPages - 1;
562 RtlSetBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, EndAllocation);
563
564 //
565 // Now we can release the lock (it mainly protects the bitmap)
566 //
567 KeReleaseGuardedMutex(&MmPagedPoolMutex);
568
569 //
570 // Now figure out where this allocation starts
571 //
572 BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT));
573
574 //
575 // Flush the TLB
576 //
577 KeFlushEntireTb(TRUE, TRUE);
578
579 /* Setup a demand-zero writable PTE */
580 MI_MAKE_SOFTWARE_PTE(&TempPte, MM_READWRITE);
581
582 //
583 // Find the first and last PTE, then loop them all
584 //
585 PointerPte = MiAddressToPte(BaseVa);
586 StartPte = PointerPte + SizeInPages;
587 do
588 {
589 //
590 // Write the demand zero PTE and keep going
591 //
592 MI_WRITE_INVALID_PTE(PointerPte, TempPte);
593 } while (++PointerPte < StartPte);
594
595 //
596 // Return the allocation address to the caller
597 //
598 return BaseVa;
599 }
600
601 //
602 // Allocations of less than 4 pages go into their individual buckets
603 //
604 i = SizeInPages - 1;
605 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
606
607 //
608 // Loop through all the free page lists based on the page index
609 //
610 NextHead = &MmNonPagedPoolFreeListHead[i];
611 LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
612
613 //
614 // Acquire the nonpaged pool lock
615 //
616 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
617 do
618 {
619 //
620 // Now loop through all the free page entries in this given list
621 //
622 NextEntry = NextHead->Flink;
623 while (NextEntry != NextHead)
624 {
625 /* Is freed non paged pool enabled */
626 if (MmProtectFreedNonPagedPool)
627 {
628 /* We need to be able to touch this page, unprotect it */
629 MiUnProtectFreeNonPagedPool(NextEntry, 0);
630 }
631
632 //
633 // Grab the entry and see if it can handle our allocation
634 //
635 FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List);
636 ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
637 if (FreeEntry->Size >= SizeInPages)
638 {
639 //
640 // It does, so consume the pages from here
641 //
642 FreeEntry->Size -= SizeInPages;
643
644 //
645 // The allocation will begin in this free page area
646 //
647 BaseVa = (PVOID)((ULONG_PTR)FreeEntry +
648 (FreeEntry->Size << PAGE_SHIFT));
649
650 /* Remove the item from the list, depending if pool is protected */
651 MmProtectFreedNonPagedPool ?
652 MiProtectedPoolRemoveEntryList(&FreeEntry->List) :
653 RemoveEntryList(&FreeEntry->List);
654
655 //
656 // However, check if its' still got space left
657 //
658 if (FreeEntry->Size != 0)
659 {
660 /* Check which list to insert this entry into */
661 i = FreeEntry->Size - 1;
662 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
663
664 /* Insert the entry into the free list head, check for prot. pool */
665 MmProtectFreedNonPagedPool ?
666 MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE) :
667 InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
668
669 /* Is freed non paged pool protected? */
670 if (MmProtectFreedNonPagedPool)
671 {
672 /* Protect the freed pool! */
673 MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
674 }
675 }
676
677 //
678 // Grab the PTE for this allocation
679 //
680 PointerPte = MiAddressToPte(BaseVa);
681 ASSERT(PointerPte->u.Hard.Valid == 1);
682
683 //
684 // Grab the PFN NextEntry and index
685 //
686 Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
687
688 //
689 // Now mark it as the beginning of an allocation
690 //
691 ASSERT(Pfn1->u3.e1.StartOfAllocation == 0);
692 Pfn1->u3.e1.StartOfAllocation = 1;
693
694 /* Mark it as special pool if needed */
695 ASSERT(Pfn1->u4.VerifierAllocation == 0);
696 if (PoolType & 64) Pfn1->u4.VerifierAllocation = 1;
697
698 //
699 // Check if the allocation is larger than one page
700 //
701 if (SizeInPages != 1)
702 {
703 //
704 // Navigate to the last PFN entry and PTE
705 //
706 PointerPte += SizeInPages - 1;
707 ASSERT(PointerPte->u.Hard.Valid == 1);
708 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
709 }
710
711 //
712 // Mark this PFN as the last (might be the same as the first)
713 //
714 ASSERT(Pfn1->u3.e1.EndOfAllocation == 0);
715 Pfn1->u3.e1.EndOfAllocation = 1;
716
717 //
718 // Release the nonpaged pool lock, and return the allocation
719 //
720 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
721 return BaseVa;
722 }
723
724 //
725 // Try the next free page entry
726 //
727 NextEntry = FreeEntry->List.Flink;
728
729 /* Is freed non paged pool protected? */
730 if (MmProtectFreedNonPagedPool)
731 {
732 /* Protect the freed pool! */
733 MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
734 }
735 }
736 } while (++NextHead < LastHead);
737
738 //
739 // If we got here, we're out of space.
740 // Start by releasing the lock
741 //
742 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
743
744 //
745 // Allocate some system PTEs
746 //
747 StartPte = MiReserveSystemPtes(SizeInPages, NonPagedPoolExpansion);
748 PointerPte = StartPte;
749 if (StartPte == NULL)
750 {
751 //
752 // Ran out of memory
753 //
754 DPRINT1("Out of NP Expansion Pool\n");
755 return NULL;
756 }
757
758 //
759 // Acquire the pool lock now
760 //
761 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
762
763 //
764 // Lock the PFN database too
765 //
766 LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueuePfnLock];
767 KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);
768
769 //
770 // Loop the pages
771 //
772 TempPte = ValidKernelPte;
773 do
774 {
775 /* Allocate a page */
776 MI_SET_USAGE(MI_USAGE_PAGED_POOL);
777 MI_SET_PROCESS2("Kernel");
778 PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());
779
780 /* Get the PFN entry for it and fill it out */
781 Pfn1 = MiGetPfnEntry(PageFrameNumber);
782 Pfn1->u3.e2.ReferenceCount = 1;
783 Pfn1->u2.ShareCount = 1;
784 Pfn1->PteAddress = PointerPte;
785 Pfn1->u3.e1.PageLocation = ActiveAndValid;
786 Pfn1->u4.VerifierAllocation = 0;
787
788 /* Write the PTE for it */
789 TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
790 MI_WRITE_VALID_PTE(PointerPte++, TempPte);
791 } while (--SizeInPages > 0);
792
793 //
794 // This is the last page
795 //
796 Pfn1->u3.e1.EndOfAllocation = 1;
797
798 //
799 // Get the first page and mark it as such
800 //
801 Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber);
802 Pfn1->u3.e1.StartOfAllocation = 1;
803
804 /* Mark it as a verifier allocation if needed */
805 ASSERT(Pfn1->u4.VerifierAllocation == 0);
806 if (PoolType & 64) Pfn1->u4.VerifierAllocation = 1;
807
808 //
809 // Release the PFN and nonpaged pool lock
810 //
811 KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
812 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
813
814 //
815 // Return the address
816 //
817 return MiPteToAddress(StartPte);
818 }
819
820 ULONG
821 NTAPI
822 MiFreePoolPages(IN PVOID StartingVa)
823 {
824 PMMPTE PointerPte, StartPte;
825 PMMPFN Pfn1, StartPfn;
826 PFN_NUMBER FreePages, NumberOfPages;
827 KIRQL OldIrql;
828 PMMFREE_POOL_ENTRY FreeEntry, NextEntry, LastEntry;
829 ULONG i, End;
830
831 //
832 // Handle paged pool
833 //
834 if ((StartingVa >= MmPagedPoolStart) && (StartingVa <= MmPagedPoolEnd))
835 {
836 //
837 // Calculate the offset from the beginning of paged pool, and convert it
838 // into pages
839 //
840 i = ((ULONG_PTR)StartingVa - (ULONG_PTR)MmPagedPoolStart) >> PAGE_SHIFT;
841 End = i;
842
843 //
844 // Now use the end bitmap to scan until we find a set bit, meaning that
845 // this allocation finishes here
846 //
847 while (!RtlTestBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End)) End++;
848
849 //
850 // Now calculate the total number of pages this allocation spans
851 //
852 NumberOfPages = End - i + 1;
853
854 /* Delete the actual pages */
855 PointerPte = MmPagedPoolInfo.FirstPteForPagedPool + i;
856 FreePages = MiDeleteSystemPageableVm(PointerPte, NumberOfPages, 0, NULL);
857 ASSERT(FreePages == NumberOfPages);
858
859 //
860 // Acquire the paged pool lock
861 //
862 KeAcquireGuardedMutex(&MmPagedPoolMutex);
863
864 //
865 // Clear the allocation and free bits
866 //
867 RtlClearBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, i);
868 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, i, NumberOfPages);
869
870 //
871 // Update the hint if we need to
872 //
873 if (i < MmPagedPoolInfo.PagedPoolHint) MmPagedPoolInfo.PagedPoolHint = i;
874
875 //
876 // Release the lock protecting the bitmaps
877 //
878 KeReleaseGuardedMutex(&MmPagedPoolMutex);
879
880 //
881 // And finally return the number of pages freed
882 //
883 return NumberOfPages;
884 }
885
886 //
887 // Get the first PTE and its corresponding PFN entry
888 //
889 StartPte = PointerPte = MiAddressToPte(StartingVa);
890 StartPfn = Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
891
892 //
893 // Loop until we find the last PTE
894 //
895 while (Pfn1->u3.e1.EndOfAllocation == 0)
896 {
897 //
898 // Keep going
899 //
900 PointerPte++;
901 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
902 }
903
904 //
905 // Now we know how many pages we have
906 //
907 NumberOfPages = PointerPte - StartPte + 1;
908
909 //
910 // Acquire the nonpaged pool lock
911 //
912 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
913
914 //
915 // Mark the first and last PTEs as not part of an allocation anymore
916 //
917 StartPfn->u3.e1.StartOfAllocation = 0;
918 Pfn1->u3.e1.EndOfAllocation = 0;
919
920 //
921 // Assume we will free as many pages as the allocation was
922 //
923 FreePages = NumberOfPages;
924
925 //
926 // Peek one page past the end of the allocation
927 //
928 PointerPte++;
929
930 //
931 // Guard against going past initial nonpaged pool
932 //
933 if (MiGetPfnEntryIndex(Pfn1) == MiEndOfInitialPoolFrame)
934 {
935 //
936 // This page is on the outskirts of initial nonpaged pool, so ignore it
937 //
938 Pfn1 = NULL;
939 }
940 else
941 {
942 /* Sanity check */
943 ASSERT((ULONG_PTR)StartingVa + NumberOfPages <= (ULONG_PTR)MmNonPagedPoolEnd);
944
945 /* Check if protected pool is enabled */
946 if (MmProtectFreedNonPagedPool)
947 {
948 /* The freed block will be merged, it must be made accessible */
949 MiUnProtectFreeNonPagedPool(MiPteToAddress(PointerPte), 0);
950 }
951
952 //
953 // Otherwise, our entire allocation must've fit within the initial non
954 // paged pool, or the expansion nonpaged pool, so get the PFN entry of
955 // the next allocation
956 //
957 if (PointerPte->u.Hard.Valid == 1)
958 {
959 //
960 // It's either expansion or initial: get the PFN entry
961 //
962 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
963 }
964 else
965 {
966 //
967 // This means we've reached the guard page that protects the end of
968 // the expansion nonpaged pool
969 //
970 Pfn1 = NULL;
971 }
972
973 }
974
975 //
976 // Check if this allocation actually exists
977 //
978 if ((Pfn1) && (Pfn1->u3.e1.StartOfAllocation == 0))
979 {
980 //
981 // It doesn't, so we should actually locate a free entry descriptor
982 //
983 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa +
984 (NumberOfPages << PAGE_SHIFT));
985 ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
986 ASSERT(FreeEntry->Owner == FreeEntry);
987
988 /* Consume this entry's pages */
989 FreePages += FreeEntry->Size;
990
991 /* Remove the item from the list, depending if pool is protected */
992 MmProtectFreedNonPagedPool ?
993 MiProtectedPoolRemoveEntryList(&FreeEntry->List) :
994 RemoveEntryList(&FreeEntry->List);
995 }
996
997 //
998 // Now get the official free entry we'll create for the caller's allocation
999 //
1000 FreeEntry = StartingVa;
1001
1002 //
1003 // Check if the our allocation is the very first page
1004 //
1005 if (MiGetPfnEntryIndex(StartPfn) == MiStartOfInitialPoolFrame)
1006 {
1007 //
1008 // Then we can't do anything or we'll risk underflowing
1009 //
1010 Pfn1 = NULL;
1011 }
1012 else
1013 {
1014 //
1015 // Otherwise, get the PTE for the page right before our allocation
1016 //
1017 PointerPte -= NumberOfPages + 1;
1018
1019 /* Check if protected pool is enabled */
1020 if (MmProtectFreedNonPagedPool)
1021 {
1022 /* The freed block will be merged, it must be made accessible */
1023 MiUnProtectFreeNonPagedPool(MiPteToAddress(PointerPte), 0);
1024 }
1025
1026 /* Check if this is valid pool, or a guard page */
1027 if (PointerPte->u.Hard.Valid == 1)
1028 {
1029 //
1030 // It's either expansion or initial nonpaged pool, get the PFN entry
1031 //
1032 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
1033 }
1034 else
1035 {
1036 //
1037 // We must've reached the guard page, so don't risk touching it
1038 //
1039 Pfn1 = NULL;
1040 }
1041 }
1042
1043 //
1044 // Check if there is a valid PFN entry for the page before the allocation
1045 // and then check if this page was actually the end of an allocation.
1046 // If it wasn't, then we know for sure it's a free page
1047 //
1048 if ((Pfn1) && (Pfn1->u3.e1.EndOfAllocation == 0))
1049 {
1050 //
1051 // Get the free entry descriptor for that given page range
1052 //
1053 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa - PAGE_SIZE);
1054 ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
1055 FreeEntry = FreeEntry->Owner;
1056
1057 /* Check if protected pool is enabled */
1058 if (MmProtectFreedNonPagedPool)
1059 {
1060 /* The freed block will be merged, it must be made accessible */
1061 MiUnProtectFreeNonPagedPool(FreeEntry, 0);
1062 }
1063
1064 //
1065 // Check if the entry is small enough to be indexed on a free list
1066 // If it is, we'll want to re-insert it, since we're about to
1067 // collapse our pages on top of it, which will change its count
1068 //
1069 if (FreeEntry->Size < (MI_MAX_FREE_PAGE_LISTS - 1))
1070 {
1071 /* Remove the item from the list, depending if pool is protected */
1072 MmProtectFreedNonPagedPool ?
1073 MiProtectedPoolRemoveEntryList(&FreeEntry->List) :
1074 RemoveEntryList(&FreeEntry->List);
1075
1076 //
1077 // Update its size
1078 //
1079 FreeEntry->Size += FreePages;
1080
1081 //
1082 // And now find the new appropriate list to place it in
1083 //
1084 i = (ULONG)(FreeEntry->Size - 1);
1085 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
1086
1087 /* Insert the entry into the free list head, check for prot. pool */
1088 MmProtectFreedNonPagedPool ?
1089 MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE) :
1090 InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
1091 }
1092 else
1093 {
1094 //
1095 // Otherwise, just combine our free pages into this entry
1096 //
1097 FreeEntry->Size += FreePages;
1098 }
1099 }
1100
1101 //
1102 // Check if we were unable to do any compaction, and we'll stick with this
1103 //
1104 if (FreeEntry == StartingVa)
1105 {
1106 //
1107 // Well, now we are a free entry. At worse we just have our newly freed
1108 // pages, at best we have our pages plus whatever entry came after us
1109 //
1110 FreeEntry->Size = FreePages;
1111
1112 //
1113 // Find the appropriate list we should be on
1114 //
1115 i = FreeEntry->Size - 1;
1116 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
1117
1118 /* Insert the entry into the free list head, check for prot. pool */
1119 MmProtectFreedNonPagedPool ?
1120 MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE) :
1121 InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
1122 }
1123
1124 //
1125 // Just a sanity check
1126 //
1127 ASSERT(FreePages != 0);
1128
1129 //
1130 // Get all the pages between our allocation and its end. These will all now
1131 // become free page chunks.
1132 //
1133 NextEntry = StartingVa;
1134 LastEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + (FreePages << PAGE_SHIFT));
1135 do
1136 {
1137 //
1138 // Link back to the parent free entry, and keep going
1139 //
1140 NextEntry->Owner = FreeEntry;
1141 NextEntry->Signature = MM_FREE_POOL_SIGNATURE;
1142 NextEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + PAGE_SIZE);
1143 } while (NextEntry != LastEntry);
1144
1145 /* Is freed non paged pool protected? */
1146 if (MmProtectFreedNonPagedPool)
1147 {
1148 /* Protect the freed pool! */
1149 MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
1150 }
1151
1152 //
1153 // We're done, release the lock and let the caller know how much we freed
1154 //
1155 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
1156 return NumberOfPages;
1157 }
1158
1159
1160 BOOLEAN
1161 NTAPI
1162 MiRaisePoolQuota(IN POOL_TYPE PoolType,
1163 IN ULONG CurrentMaxQuota,
1164 OUT PULONG NewMaxQuota)
1165 {
1166 //
1167 // Not implemented
1168 //
1169 UNIMPLEMENTED;
1170 *NewMaxQuota = CurrentMaxQuota + 65536;
1171 return TRUE;
1172 }
1173
1174 /* PUBLIC FUNCTIONS ***********************************************************/
1175
1176 /*
1177 * @unimplemented
1178 */
1179 PVOID
1180 NTAPI
1181 MmAllocateMappingAddress(IN SIZE_T NumberOfBytes,
1182 IN ULONG PoolTag)
1183 {
1184 UNIMPLEMENTED;
1185 return NULL;
1186 }
1187
1188 /*
1189 * @unimplemented
1190 */
1191 VOID
1192 NTAPI
1193 MmFreeMappingAddress(IN PVOID BaseAddress,
1194 IN ULONG PoolTag)
1195 {
1196 UNIMPLEMENTED;
1197 }
1198
1199 /* EOF */