[NTOS]: Use SYSTEM_PD_SIZE instead of assuming that this is PAGE_SIZE, since this...
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / pool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/pool.c
5 * PURPOSE: ARM Memory Manager Pool Allocator
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::POOL"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
18
19 /* GLOBALS ********************************************************************/
20
21 LIST_ENTRY MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
22 PFN_NUMBER MmNumberOfFreeNonPagedPool, MiExpansionPoolPagesInitialCharge;
23 PVOID MmNonPagedPoolEnd0;
24 PFN_NUMBER MiStartOfInitialPoolFrame, MiEndOfInitialPoolFrame;
25 KGUARDED_MUTEX MmPagedPoolMutex;
26 MM_PAGED_POOL_INFO MmPagedPoolInfo;
27 SIZE_T MmAllocatedNonPagedPool;
28 ULONG MmSpecialPoolTag;
29 ULONG MmConsumedPoolPercentage;
30 BOOLEAN MmProtectFreedNonPagedPool;
31
32 /* PRIVATE FUNCTIONS **********************************************************/
33
34 VOID
35 NTAPI
36 MiProtectFreeNonPagedPool(IN PVOID VirtualAddress,
37 IN ULONG PageCount)
38 {
39 PMMPTE PointerPte, LastPte;
40 MMPTE TempPte;
41
42 /* If pool is physical, can't protect PTEs */
43 if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) return;
44
45 /* Get PTE pointers and loop */
46 PointerPte = MiAddressToPte(VirtualAddress);
47 LastPte = PointerPte + PageCount;
48 do
49 {
50 /* Capture the PTE for safety */
51 TempPte = *PointerPte;
52
53 /* Mark it as an invalid PTE, set proto bit to recognize it as pool */
54 TempPte.u.Hard.Valid = 0;
55 TempPte.u.Soft.Prototype = 1;
56 MI_WRITE_INVALID_PTE(PointerPte, TempPte);
57 } while (++PointerPte < LastPte);
58
59 /* Flush the TLB */
60 KeFlushEntireTb(TRUE, TRUE);
61 }
62
63 BOOLEAN
64 NTAPI
65 MiUnProtectFreeNonPagedPool(IN PVOID VirtualAddress,
66 IN ULONG PageCount)
67 {
68 PMMPTE PointerPte;
69 MMPTE TempPte;
70 PFN_NUMBER UnprotectedPages = 0;
71
72 /* If pool is physical, can't protect PTEs */
73 if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) return FALSE;
74
75 /* Get, and capture the PTE */
76 PointerPte = MiAddressToPte(VirtualAddress);
77 TempPte = *PointerPte;
78
79 /* Loop protected PTEs */
80 while ((TempPte.u.Hard.Valid == 0) && (TempPte.u.Soft.Prototype == 1))
81 {
82 /* Unprotect the PTE */
83 TempPte.u.Hard.Valid = 1;
84 TempPte.u.Soft.Prototype = 0;
85 MI_WRITE_VALID_PTE(PointerPte, TempPte);
86
87 /* One more page */
88 if (++UnprotectedPages == PageCount) break;
89
90 /* Capture next PTE */
91 TempPte = *(++PointerPte);
92 }
93
94 /* Return if any pages were unprotected */
95 return UnprotectedPages ? TRUE : FALSE;
96 }
97
98 VOID
99 FORCEINLINE
100 MiProtectedPoolUnProtectLinks(IN PLIST_ENTRY Links,
101 OUT PVOID* PoolFlink,
102 OUT PVOID* PoolBlink)
103 {
104 BOOLEAN Safe;
105 PVOID PoolVa;
106
107 /* Initialize variables */
108 *PoolFlink = *PoolBlink = NULL;
109
110 /* Check if the list has entries */
111 if (IsListEmpty(Links) == FALSE)
112 {
113 /* We are going to need to forward link to do an insert */
114 PoolVa = Links->Flink;
115
116 /* So make it safe to access */
117 Safe = MiUnProtectFreeNonPagedPool(PoolVa, 1);
118 if (Safe) PoolFlink = PoolVa;
119 }
120
121 /* Are we going to need a backward link too? */
122 if (Links != Links->Blink)
123 {
124 /* Get the head's backward link for the insert */
125 PoolVa = Links->Blink;
126
127 /* Make it safe to access */
128 Safe = MiUnProtectFreeNonPagedPool(PoolVa, 1);
129 if (Safe) PoolBlink = PoolVa;
130 }
131 }
132
133 VOID
134 FORCEINLINE
135 MiProtectedPoolProtectLinks(IN PVOID PoolFlink,
136 IN PVOID PoolBlink)
137 {
138 /* Reprotect the pages, if they got unprotected earlier */
139 if (PoolFlink) MiProtectFreeNonPagedPool(PoolFlink, 1);
140 if (PoolBlink) MiProtectFreeNonPagedPool(PoolBlink, 1);
141 }
142
143 VOID
144 NTAPI
145 MiProtectedPoolInsertList(IN PLIST_ENTRY ListHead,
146 IN PLIST_ENTRY Entry,
147 IN BOOLEAN Critical)
148 {
149 PVOID PoolFlink, PoolBlink;
150
151 /* Make the list accessible */
152 MiProtectedPoolUnProtectLinks(ListHead, &PoolFlink, &PoolBlink);
153
154 /* Now insert in the right position */
155 Critical ? InsertHeadList(ListHead, Entry) : InsertTailList(ListHead, Entry);
156
157 /* And reprotect the pages containing the free links */
158 MiProtectedPoolProtectLinks(PoolFlink, PoolBlink);
159 }
160
161 VOID
162 NTAPI
163 MiProtectedPoolRemoveEntryList(IN PLIST_ENTRY Entry)
164 {
165 PVOID PoolFlink, PoolBlink;
166
167 /* Make the list accessible */
168 MiProtectedPoolUnProtectLinks(Entry, &PoolFlink, &PoolBlink);
169
170 /* Now remove */
171 RemoveEntryList(Entry);
172
173 /* And reprotect the pages containing the free links */
174 if (PoolFlink) MiProtectFreeNonPagedPool(PoolFlink, 1);
175 if (PoolBlink) MiProtectFreeNonPagedPool(PoolBlink, 1);
176 }
177
178 VOID
179 NTAPI
180 MiInitializeNonPagedPoolThresholds(VOID)
181 {
182 PFN_NUMBER Size = MmMaximumNonPagedPoolInPages;
183
184 /* Default low threshold of 8MB or one third of nonpaged pool */
185 MiLowNonPagedPoolThreshold = (8 * _1MB) >> PAGE_SHIFT;
186 MiLowNonPagedPoolThreshold = min(MiLowNonPagedPoolThreshold, Size / 3);
187
188 /* Default high threshold of 20MB or 50% */
189 MiHighNonPagedPoolThreshold = (20 * _1MB) >> PAGE_SHIFT;
190 MiHighNonPagedPoolThreshold = min(MiHighNonPagedPoolThreshold, Size / 2);
191 ASSERT(MiLowNonPagedPoolThreshold < MiHighNonPagedPoolThreshold);
192 }
193
194 VOID
195 NTAPI
196 MiInitializePoolEvents(VOID)
197 {
198 KIRQL OldIrql;
199 PFN_NUMBER FreePoolInPages;
200
201 /* Lock paged pool */
202 KeAcquireGuardedMutex(&MmPagedPoolMutex);
203
204 /* Total size of the paged pool minus the allocated size, is free */
205 FreePoolInPages = MmSizeOfPagedPoolInPages - MmPagedPoolInfo.AllocatedPagedPool;
206
207 /* Check the initial state high state */
208 if (FreePoolInPages >= MiHighPagedPoolThreshold)
209 {
210 /* We have plenty of pool */
211 KeSetEvent(MiHighPagedPoolEvent, 0, FALSE);
212 }
213 else
214 {
215 /* We don't */
216 KeClearEvent(MiHighPagedPoolEvent);
217 }
218
219 /* Check the initial low state */
220 if (FreePoolInPages <= MiLowPagedPoolThreshold)
221 {
222 /* We're very low in free pool memory */
223 KeSetEvent(MiLowPagedPoolEvent, 0, FALSE);
224 }
225 else
226 {
227 /* We're not */
228 KeClearEvent(MiLowPagedPoolEvent);
229 }
230
231 /* Release the paged pool lock */
232 KeReleaseGuardedMutex(&MmPagedPoolMutex);
233
234 /* Now it's time for the nonpaged pool lock */
235 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
236
237 /* Free pages are the maximum minus what's been allocated */
238 FreePoolInPages = MmMaximumNonPagedPoolInPages - MmAllocatedNonPagedPool;
239
240 /* Check if we have plenty */
241 if (FreePoolInPages >= MiHighNonPagedPoolThreshold)
242 {
243 /* We do, set the event */
244 KeSetEvent(MiHighNonPagedPoolEvent, 0, FALSE);
245 }
246 else
247 {
248 /* We don't, clear the event */
249 KeClearEvent(MiHighNonPagedPoolEvent);
250 }
251
252 /* Check if we have very little */
253 if (FreePoolInPages <= MiLowNonPagedPoolThreshold)
254 {
255 /* We do, set the event */
256 KeSetEvent(MiLowNonPagedPoolEvent, 0, FALSE);
257 }
258 else
259 {
260 /* We don't, clear it */
261 KeClearEvent(MiLowNonPagedPoolEvent);
262 }
263
264 /* We're done, release the nonpaged pool lock */
265 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
266 }
267
268 VOID
269 NTAPI
270 MiInitializeNonPagedPool(VOID)
271 {
272 ULONG i;
273 PFN_NUMBER PoolPages;
274 PMMFREE_POOL_ENTRY FreeEntry, FirstEntry;
275 PMMPTE PointerPte;
276 PAGED_CODE();
277
278 //
279 // We keep 4 lists of free pages (4 lists help avoid contention)
280 //
281 for (i = 0; i < MI_MAX_FREE_PAGE_LISTS; i++)
282 {
283 //
284 // Initialize each of them
285 //
286 InitializeListHead(&MmNonPagedPoolFreeListHead[i]);
287 }
288
289 //
290 // Calculate how many pages the initial nonpaged pool has
291 //
292 PoolPages = BYTES_TO_PAGES(MmSizeOfNonPagedPoolInBytes);
293 MmNumberOfFreeNonPagedPool = PoolPages;
294
295 //
296 // Initialize the first free entry
297 //
298 FreeEntry = MmNonPagedPoolStart;
299 FirstEntry = FreeEntry;
300 FreeEntry->Size = PoolPages;
301 FreeEntry->Signature = MM_FREE_POOL_SIGNATURE;
302 FreeEntry->Owner = FirstEntry;
303
304 //
305 // Insert it into the last list
306 //
307 InsertHeadList(&MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS - 1],
308 &FreeEntry->List);
309
310 //
311 // Now create free entries for every single other page
312 //
313 while (PoolPages-- > 1)
314 {
315 //
316 // Link them all back to the original entry
317 //
318 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)FreeEntry + PAGE_SIZE);
319 FreeEntry->Owner = FirstEntry;
320 FreeEntry->Signature = MM_FREE_POOL_SIGNATURE;
321 }
322
323 //
324 // Validate and remember first allocated pool page
325 //
326 PointerPte = MiAddressToPte(MmNonPagedPoolStart);
327 ASSERT(PointerPte->u.Hard.Valid == 1);
328 MiStartOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
329
330 //
331 // Keep track of where initial nonpaged pool ends
332 //
333 MmNonPagedPoolEnd0 = (PVOID)((ULONG_PTR)MmNonPagedPoolStart +
334 MmSizeOfNonPagedPoolInBytes);
335
336 //
337 // Validate and remember last allocated pool page
338 //
339 PointerPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolEnd0 - 1));
340 ASSERT(PointerPte->u.Hard.Valid == 1);
341 MiEndOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
342
343 //
344 // Validate the first nonpaged pool expansion page (which is a guard page)
345 //
346 PointerPte = MiAddressToPte(MmNonPagedPoolExpansionStart);
347 ASSERT(PointerPte->u.Hard.Valid == 0);
348
349 //
350 // Calculate the size of the expansion region alone
351 //
352 MiExpansionPoolPagesInitialCharge =
353 BYTES_TO_PAGES(MmMaximumNonPagedPoolInBytes - MmSizeOfNonPagedPoolInBytes);
354
355 //
356 // Remove 2 pages, since there's a guard page on top and on the bottom
357 //
358 MiExpansionPoolPagesInitialCharge -= 2;
359
360 //
361 // Now initialize the nonpaged pool expansion PTE space. Remember there's a
362 // guard page on top so make sure to skip it. The bottom guard page will be
363 // guaranteed by the fact our size is off by one.
364 //
365 MiInitializeSystemPtes(PointerPte + 1,
366 MiExpansionPoolPagesInitialCharge,
367 NonPagedPoolExpansion);
368 }
369
370 PVOID
371 NTAPI
372 MiAllocatePoolPages(IN POOL_TYPE PoolType,
373 IN SIZE_T SizeInBytes)
374 {
375 PFN_NUMBER SizeInPages, PageFrameNumber;
376 ULONG i;
377 KIRQL OldIrql;
378 PLIST_ENTRY NextEntry, NextHead, LastHead;
379 PMMPTE PointerPte, StartPte;
380 PMMPDE PointerPde;
381 ULONG EndAllocation;
382 MMPTE TempPte;
383 MMPDE TempPde;
384 PMMPFN Pfn1;
385 PVOID BaseVa, BaseVaStart;
386 PMMFREE_POOL_ENTRY FreeEntry;
387 PKSPIN_LOCK_QUEUE LockQueue;
388
389 //
390 // Figure out how big the allocation is in pages
391 //
392 SizeInPages = BYTES_TO_PAGES(SizeInBytes);
393
394 //
395 // Handle paged pool
396 //
397 if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool)
398 {
399 //
400 // Lock the paged pool mutex
401 //
402 KeAcquireGuardedMutex(&MmPagedPoolMutex);
403
404 //
405 // Find some empty allocation space
406 //
407 i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
408 SizeInPages,
409 MmPagedPoolInfo.PagedPoolHint);
410 if (i == 0xFFFFFFFF)
411 {
412 //
413 // Get the page bit count
414 //
415 i = ((SizeInPages - 1) / PTE_COUNT) + 1;
416 DPRINT1("Paged pool expansion: %d %x\n", i, SizeInPages);
417
418 //
419 // Check if there is enougn paged pool expansion space left
420 //
421 if (MmPagedPoolInfo.NextPdeForPagedPoolExpansion >
422 MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
423 {
424 //
425 // Out of memory!
426 //
427 DPRINT1("OUT OF PAGED POOL!!!\n");
428 KeReleaseGuardedMutex(&MmPagedPoolMutex);
429 return NULL;
430 }
431
432 //
433 // Check if we'll have to expand past the last PTE we have available
434 //
435 if (((i - 1) + MmPagedPoolInfo.NextPdeForPagedPoolExpansion) >
436 MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
437 {
438 //
439 // We can only support this much then
440 //
441 SizeInPages = MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool) -
442 MmPagedPoolInfo.NextPdeForPagedPoolExpansion +
443 1;
444 ASSERT(SizeInPages < i);
445 i = SizeInPages;
446 }
447 else
448 {
449 //
450 // Otherwise, there is plenty of space left for this expansion
451 //
452 SizeInPages = i;
453 }
454
455 //
456 // Get the template PDE we'll use to expand
457 //
458 TempPde = ValidKernelPde;
459
460 //
461 // Get the first PTE in expansion space
462 //
463 PointerPde = MmPagedPoolInfo.NextPdeForPagedPoolExpansion;
464 BaseVa = MiPteToAddress(PointerPde);
465 BaseVaStart = BaseVa;
466
467 //
468 // Lock the PFN database and loop pages
469 //
470 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
471 do
472 {
473 //
474 // It should not already be valid
475 //
476 ASSERT(PointerPde->u.Hard.Valid == 0);
477
478 /* Request a page */
479 DPRINT1("Requesting %d PDEs\n", i);
480 PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());
481 TempPde.u.Hard.PageFrameNumber = PageFrameNumber;
482 DPRINT1("We have a PDE: %lx\n", PageFrameNumber);
483
484 #if (_MI_PAGING_LEVELS >= 3)
485 /* On PAE/x64 systems, there's no double-buffering */
486 ASSERT(FALSE);
487 #else
488 //
489 // Save it into our double-buffered system page directory
490 //
491 MmSystemPagePtes[(ULONG_PTR)PointerPde & (SYSTEM_PD_SIZE - 1)] = TempPde;
492
493 /* Initialize the PFN */
494 MiInitializePfnForOtherProcess(PageFrameNumber,
495 PointerPde,
496 MmSystemPageDirectory[(PointerPde - MiAddressToPde(NULL)) / PDE_COUNT]);
497
498 /* Write the actual PDE now */
499 MI_WRITE_VALID_PTE(PointerPde, TempPde);
500 #endif
501 //
502 // Move on to the next expansion address
503 //
504 PointerPde++;
505 BaseVa = (PVOID)((ULONG_PTR)BaseVa + PAGE_SIZE);
506 i--;
507 } while (i > 0);
508
509 //
510 // Release the PFN database lock
511 //
512 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
513
514 //
515 // These pages are now available, clear their availablity bits
516 //
517 EndAllocation = (MmPagedPoolInfo.NextPdeForPagedPoolExpansion -
518 MiAddressToPte(MmPagedPoolInfo.FirstPteForPagedPool)) *
519 PTE_COUNT;
520 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap,
521 EndAllocation,
522 SizeInPages * PTE_COUNT);
523
524 //
525 // Update the next expansion location
526 //
527 MmPagedPoolInfo.NextPdeForPagedPoolExpansion += SizeInPages;
528
529 //
530 // Zero out the newly available memory
531 //
532 RtlZeroMemory(BaseVaStart, SizeInPages * PAGE_SIZE);
533
534 //
535 // Now try consuming the pages again
536 //
537 SizeInPages = BYTES_TO_PAGES(SizeInBytes);
538 i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
539 SizeInPages,
540 0);
541 if (i == 0xFFFFFFFF)
542 {
543 //
544 // Out of memory!
545 //
546 DPRINT1("OUT OF PAGED POOL!!!\n");
547 KeReleaseGuardedMutex(&MmPagedPoolMutex);
548 return NULL;
549 }
550 }
551
552 //
553 // Update the pool hint if the request was just one page
554 //
555 if (SizeInPages == 1) MmPagedPoolInfo.PagedPoolHint = i + 1;
556
557 //
558 // Update the end bitmap so we know the bounds of this allocation when
559 // the time comes to free it
560 //
561 EndAllocation = i + SizeInPages - 1;
562 RtlSetBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, EndAllocation);
563
564 //
565 // Now we can release the lock (it mainly protects the bitmap)
566 //
567 KeReleaseGuardedMutex(&MmPagedPoolMutex);
568
569 //
570 // Now figure out where this allocation starts
571 //
572 BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT));
573
574 //
575 // Flush the TLB
576 //
577 KeFlushEntireTb(TRUE, TRUE);
578
579 /* Setup a demand-zero writable PTE */
580 MI_MAKE_SOFTWARE_PTE(&TempPte, MM_READWRITE);
581
582 //
583 // Find the first and last PTE, then loop them all
584 //
585 PointerPte = MiAddressToPte(BaseVa);
586 StartPte = PointerPte + SizeInPages;
587 do
588 {
589 //
590 // Write the demand zero PTE and keep going
591 //
592 MI_WRITE_INVALID_PTE(PointerPte, TempPte);
593 } while (++PointerPte < StartPte);
594
595 //
596 // Return the allocation address to the caller
597 //
598 return BaseVa;
599 }
600
601 //
602 // Allocations of less than 4 pages go into their individual buckets
603 //
604 i = SizeInPages - 1;
605 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
606
607 //
608 // Loop through all the free page lists based on the page index
609 //
610 NextHead = &MmNonPagedPoolFreeListHead[i];
611 LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
612
613 //
614 // Acquire the nonpaged pool lock
615 //
616 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
617 do
618 {
619 //
620 // Now loop through all the free page entries in this given list
621 //
622 NextEntry = NextHead->Flink;
623 while (NextEntry != NextHead)
624 {
625 /* Is freed non paged pool enabled */
626 if (MmProtectFreedNonPagedPool)
627 {
628 /* We need to be able to touch this page, unprotect it */
629 MiUnProtectFreeNonPagedPool(NextEntry, 0);
630 }
631
632 //
633 // Grab the entry and see if it can handle our allocation
634 //
635 FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List);
636 ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
637 if (FreeEntry->Size >= SizeInPages)
638 {
639 //
640 // It does, so consume the pages from here
641 //
642 FreeEntry->Size -= SizeInPages;
643
644 //
645 // The allocation will begin in this free page area
646 //
647 BaseVa = (PVOID)((ULONG_PTR)FreeEntry +
648 (FreeEntry->Size << PAGE_SHIFT));
649
650 /* Remove the item from the list, depending if pool is protected */
651 MmProtectFreedNonPagedPool ?
652 MiProtectedPoolRemoveEntryList(&FreeEntry->List) :
653 RemoveEntryList(&FreeEntry->List);
654
655 //
656 // However, check if its' still got space left
657 //
658 if (FreeEntry->Size != 0)
659 {
660 /* Check which list to insert this entry into */
661 i = FreeEntry->Size - 1;
662 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
663
664 /* Insert the entry into the free list head, check for prot. pool */
665 MmProtectFreedNonPagedPool ?
666 MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE) :
667 InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
668
669 /* Is freed non paged pool protected? */
670 if (MmProtectFreedNonPagedPool)
671 {
672 /* Protect the freed pool! */
673 MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
674 }
675 }
676
677 //
678 // Grab the PTE for this allocation
679 //
680 PointerPte = MiAddressToPte(BaseVa);
681 ASSERT(PointerPte->u.Hard.Valid == 1);
682
683 //
684 // Grab the PFN NextEntry and index
685 //
686 Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
687
688 //
689 // Now mark it as the beginning of an allocation
690 //
691 ASSERT(Pfn1->u3.e1.StartOfAllocation == 0);
692 Pfn1->u3.e1.StartOfAllocation = 1;
693
694 /* Mark it as special pool if needed */
695 ASSERT(Pfn1->u4.VerifierAllocation == 0);
696 if (PoolType & 64) Pfn1->u4.VerifierAllocation = 1;
697
698 //
699 // Check if the allocation is larger than one page
700 //
701 if (SizeInPages != 1)
702 {
703 //
704 // Navigate to the last PFN entry and PTE
705 //
706 PointerPte += SizeInPages - 1;
707 ASSERT(PointerPte->u.Hard.Valid == 1);
708 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
709 }
710
711 //
712 // Mark this PFN as the last (might be the same as the first)
713 //
714 ASSERT(Pfn1->u3.e1.EndOfAllocation == 0);
715 Pfn1->u3.e1.EndOfAllocation = 1;
716
717 //
718 // Release the nonpaged pool lock, and return the allocation
719 //
720 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
721 return BaseVa;
722 }
723
724 //
725 // Try the next free page entry
726 //
727 NextEntry = FreeEntry->List.Flink;
728
729 /* Is freed non paged pool protected? */
730 if (MmProtectFreedNonPagedPool)
731 {
732 /* Protect the freed pool! */
733 MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
734 }
735 }
736 } while (++NextHead < LastHead);
737
738 //
739 // If we got here, we're out of space.
740 // Start by releasing the lock
741 //
742 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
743
744 //
745 // Allocate some system PTEs
746 //
747 StartPte = MiReserveSystemPtes(SizeInPages, NonPagedPoolExpansion);
748 PointerPte = StartPte;
749 if (StartPte == NULL)
750 {
751 //
752 // Ran out of memory
753 //
754 DPRINT1("Out of NP Expansion Pool\n");
755 return NULL;
756 }
757
758 //
759 // Acquire the pool lock now
760 //
761 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
762
763 //
764 // Lock the PFN database too
765 //
766 LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueuePfnLock];
767 KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);
768
769 //
770 // Loop the pages
771 //
772 TempPte = ValidKernelPte;
773 do
774 {
775 /* Allocate a page */
776 PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());
777
778 /* Get the PFN entry for it and fill it out */
779 Pfn1 = MiGetPfnEntry(PageFrameNumber);
780 Pfn1->u3.e2.ReferenceCount = 1;
781 Pfn1->u2.ShareCount = 1;
782 Pfn1->PteAddress = PointerPte;
783 Pfn1->u3.e1.PageLocation = ActiveAndValid;
784 Pfn1->u4.VerifierAllocation = 0;
785
786 /* Write the PTE for it */
787 TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
788 MI_WRITE_VALID_PTE(PointerPte++, TempPte);
789 } while (--SizeInPages > 0);
790
791 //
792 // This is the last page
793 //
794 Pfn1->u3.e1.EndOfAllocation = 1;
795
796 //
797 // Get the first page and mark it as such
798 //
799 Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber);
800 Pfn1->u3.e1.StartOfAllocation = 1;
801
802 /* Mark it as a verifier allocation if needed */
803 ASSERT(Pfn1->u4.VerifierAllocation == 0);
804 if (PoolType & 64) Pfn1->u4.VerifierAllocation = 1;
805
806 //
807 // Release the PFN and nonpaged pool lock
808 //
809 KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
810 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
811
812 //
813 // Return the address
814 //
815 return MiPteToAddress(StartPte);
816 }
817
818 ULONG
819 NTAPI
820 MiFreePoolPages(IN PVOID StartingVa)
821 {
822 PMMPTE PointerPte, StartPte;
823 PMMPFN Pfn1, StartPfn;
824 PFN_NUMBER FreePages, NumberOfPages;
825 KIRQL OldIrql;
826 PMMFREE_POOL_ENTRY FreeEntry, NextEntry, LastEntry;
827 ULONG i, End;
828
829 //
830 // Handle paged pool
831 //
832 if ((StartingVa >= MmPagedPoolStart) && (StartingVa <= MmPagedPoolEnd))
833 {
834 //
835 // Calculate the offset from the beginning of paged pool, and convert it
836 // into pages
837 //
838 i = ((ULONG_PTR)StartingVa - (ULONG_PTR)MmPagedPoolStart) >> PAGE_SHIFT;
839 End = i;
840
841 //
842 // Now use the end bitmap to scan until we find a set bit, meaning that
843 // this allocation finishes here
844 //
845 while (!RtlTestBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End)) End++;
846
847 //
848 // Now calculate the total number of pages this allocation spans
849 //
850 NumberOfPages = End - i + 1;
851
852 /* Delete the actual pages */
853 PointerPte = MmPagedPoolInfo.FirstPteForPagedPool + i;
854 FreePages = MiDeleteSystemPageableVm(PointerPte, NumberOfPages, 0, NULL);
855 ASSERT(FreePages == NumberOfPages);
856
857 //
858 // Acquire the paged pool lock
859 //
860 KeAcquireGuardedMutex(&MmPagedPoolMutex);
861
862 //
863 // Clear the allocation and free bits
864 //
865 RtlClearBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, i);
866 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, i, NumberOfPages);
867
868 //
869 // Update the hint if we need to
870 //
871 if (i < MmPagedPoolInfo.PagedPoolHint) MmPagedPoolInfo.PagedPoolHint = i;
872
873 //
874 // Release the lock protecting the bitmaps
875 //
876 KeReleaseGuardedMutex(&MmPagedPoolMutex);
877
878 //
879 // And finally return the number of pages freed
880 //
881 return NumberOfPages;
882 }
883
884 //
885 // Get the first PTE and its corresponding PFN entry
886 //
887 StartPte = PointerPte = MiAddressToPte(StartingVa);
888 StartPfn = Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
889
890 //
891 // Loop until we find the last PTE
892 //
893 while (Pfn1->u3.e1.EndOfAllocation == 0)
894 {
895 //
896 // Keep going
897 //
898 PointerPte++;
899 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
900 }
901
902 //
903 // Now we know how many pages we have
904 //
905 NumberOfPages = PointerPte - StartPte + 1;
906
907 //
908 // Acquire the nonpaged pool lock
909 //
910 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
911
912 //
913 // Mark the first and last PTEs as not part of an allocation anymore
914 //
915 StartPfn->u3.e1.StartOfAllocation = 0;
916 Pfn1->u3.e1.EndOfAllocation = 0;
917
918 //
919 // Assume we will free as many pages as the allocation was
920 //
921 FreePages = NumberOfPages;
922
923 //
924 // Peek one page past the end of the allocation
925 //
926 PointerPte++;
927
928 //
929 // Guard against going past initial nonpaged pool
930 //
931 if (MiGetPfnEntryIndex(Pfn1) == MiEndOfInitialPoolFrame)
932 {
933 //
934 // This page is on the outskirts of initial nonpaged pool, so ignore it
935 //
936 Pfn1 = NULL;
937 }
938 else
939 {
940 /* Sanity check */
941 ASSERT((ULONG_PTR)StartingVa + NumberOfPages <= (ULONG_PTR)MmNonPagedPoolEnd);
942
943 /* Check if protected pool is enabled */
944 if (MmProtectFreedNonPagedPool)
945 {
946 /* The freed block will be merged, it must be made accessible */
947 MiUnProtectFreeNonPagedPool(MiPteToAddress(PointerPte), 0);
948 }
949
950 //
951 // Otherwise, our entire allocation must've fit within the initial non
952 // paged pool, or the expansion nonpaged pool, so get the PFN entry of
953 // the next allocation
954 //
955 if (PointerPte->u.Hard.Valid == 1)
956 {
957 //
958 // It's either expansion or initial: get the PFN entry
959 //
960 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
961 }
962 else
963 {
964 //
965 // This means we've reached the guard page that protects the end of
966 // the expansion nonpaged pool
967 //
968 Pfn1 = NULL;
969 }
970
971 }
972
973 //
974 // Check if this allocation actually exists
975 //
976 if ((Pfn1) && (Pfn1->u3.e1.StartOfAllocation == 0))
977 {
978 //
979 // It doesn't, so we should actually locate a free entry descriptor
980 //
981 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa +
982 (NumberOfPages << PAGE_SHIFT));
983 ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
984 ASSERT(FreeEntry->Owner == FreeEntry);
985
986 /* Consume this entry's pages */
987 FreePages += FreeEntry->Size;
988
989 /* Remove the item from the list, depending if pool is protected */
990 MmProtectFreedNonPagedPool ?
991 MiProtectedPoolRemoveEntryList(&FreeEntry->List) :
992 RemoveEntryList(&FreeEntry->List);
993 }
994
995 //
996 // Now get the official free entry we'll create for the caller's allocation
997 //
998 FreeEntry = StartingVa;
999
1000 //
1001 // Check if the our allocation is the very first page
1002 //
1003 if (MiGetPfnEntryIndex(StartPfn) == MiStartOfInitialPoolFrame)
1004 {
1005 //
1006 // Then we can't do anything or we'll risk underflowing
1007 //
1008 Pfn1 = NULL;
1009 }
1010 else
1011 {
1012 //
1013 // Otherwise, get the PTE for the page right before our allocation
1014 //
1015 PointerPte -= NumberOfPages + 1;
1016
1017 /* Check if protected pool is enabled */
1018 if (MmProtectFreedNonPagedPool)
1019 {
1020 /* The freed block will be merged, it must be made accessible */
1021 MiUnProtectFreeNonPagedPool(MiPteToAddress(PointerPte), 0);
1022 }
1023
1024 /* Check if this is valid pool, or a guard page */
1025 if (PointerPte->u.Hard.Valid == 1)
1026 {
1027 //
1028 // It's either expansion or initial nonpaged pool, get the PFN entry
1029 //
1030 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
1031 }
1032 else
1033 {
1034 //
1035 // We must've reached the guard page, so don't risk touching it
1036 //
1037 Pfn1 = NULL;
1038 }
1039 }
1040
1041 //
1042 // Check if there is a valid PFN entry for the page before the allocation
1043 // and then check if this page was actually the end of an allocation.
1044 // If it wasn't, then we know for sure it's a free page
1045 //
1046 if ((Pfn1) && (Pfn1->u3.e1.EndOfAllocation == 0))
1047 {
1048 //
1049 // Get the free entry descriptor for that given page range
1050 //
1051 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa - PAGE_SIZE);
1052 ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
1053 FreeEntry = FreeEntry->Owner;
1054
1055 /* Check if protected pool is enabled */
1056 if (MmProtectFreedNonPagedPool)
1057 {
1058 /* The freed block will be merged, it must be made accessible */
1059 MiUnProtectFreeNonPagedPool(FreeEntry, 0);
1060 }
1061
1062 //
1063 // Check if the entry is small enough to be indexed on a free list
1064 // If it is, we'll want to re-insert it, since we're about to
1065 // collapse our pages on top of it, which will change its count
1066 //
1067 if (FreeEntry->Size < (MI_MAX_FREE_PAGE_LISTS - 1))
1068 {
1069 /* Remove the item from the list, depending if pool is protected */
1070 MmProtectFreedNonPagedPool ?
1071 MiProtectedPoolRemoveEntryList(&FreeEntry->List) :
1072 RemoveEntryList(&FreeEntry->List);
1073
1074 //
1075 // Update its size
1076 //
1077 FreeEntry->Size += FreePages;
1078
1079 //
1080 // And now find the new appropriate list to place it in
1081 //
1082 i = (ULONG)(FreeEntry->Size - 1);
1083 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
1084
1085 /* Insert the entry into the free list head, check for prot. pool */
1086 MmProtectFreedNonPagedPool ?
1087 MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE) :
1088 InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
1089 }
1090 else
1091 {
1092 //
1093 // Otherwise, just combine our free pages into this entry
1094 //
1095 FreeEntry->Size += FreePages;
1096 }
1097 }
1098
1099 //
1100 // Check if we were unable to do any compaction, and we'll stick with this
1101 //
1102 if (FreeEntry == StartingVa)
1103 {
1104 //
1105 // Well, now we are a free entry. At worse we just have our newly freed
1106 // pages, at best we have our pages plus whatever entry came after us
1107 //
1108 FreeEntry->Size = FreePages;
1109
1110 //
1111 // Find the appropriate list we should be on
1112 //
1113 i = FreeEntry->Size - 1;
1114 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
1115
1116 /* Insert the entry into the free list head, check for prot. pool */
1117 MmProtectFreedNonPagedPool ?
1118 MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE) :
1119 InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
1120 }
1121
1122 //
1123 // Just a sanity check
1124 //
1125 ASSERT(FreePages != 0);
1126
1127 //
1128 // Get all the pages between our allocation and its end. These will all now
1129 // become free page chunks.
1130 //
1131 NextEntry = StartingVa;
1132 LastEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + (FreePages << PAGE_SHIFT));
1133 do
1134 {
1135 //
1136 // Link back to the parent free entry, and keep going
1137 //
1138 NextEntry->Owner = FreeEntry;
1139 NextEntry->Signature = MM_FREE_POOL_SIGNATURE;
1140 NextEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + PAGE_SIZE);
1141 } while (NextEntry != LastEntry);
1142
1143 /* Is freed non paged pool protected? */
1144 if (MmProtectFreedNonPagedPool)
1145 {
1146 /* Protect the freed pool! */
1147 MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
1148 }
1149
1150 //
1151 // We're done, release the lock and let the caller know how much we freed
1152 //
1153 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
1154 return NumberOfPages;
1155 }
1156
1157
1158 BOOLEAN
1159 NTAPI
1160 MiRaisePoolQuota(IN POOL_TYPE PoolType,
1161 IN ULONG CurrentMaxQuota,
1162 OUT PULONG NewMaxQuota)
1163 {
1164 //
1165 // Not implemented
1166 //
1167 UNIMPLEMENTED;
1168 *NewMaxQuota = CurrentMaxQuota + 65536;
1169 return TRUE;
1170 }
1171
1172 /* PUBLIC FUNCTIONS ***********************************************************/
1173
1174 /*
1175 * @unimplemented
1176 */
1177 PVOID
1178 NTAPI
1179 MmAllocateMappingAddress(IN SIZE_T NumberOfBytes,
1180 IN ULONG PoolTag)
1181 {
1182 UNIMPLEMENTED;
1183 return NULL;
1184 }
1185
1186 /*
1187 * @unimplemented
1188 */
1189 VOID
1190 NTAPI
1191 MmFreeMappingAddress(IN PVOID BaseAddress,
1192 IN ULONG PoolTag)
1193 {
1194 UNIMPLEMENTED;
1195 }
1196
1197 /* EOF */