[NTOSKRNL] Implement ObSetDirectoryDeviceMap
[reactos.git] / ntoskrnl / mm / ARM3 / pool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/pool.c
5 * PURPOSE: ARM Memory Manager Pool Allocator
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
17
18 /* GLOBALS ********************************************************************/
19
20 LIST_ENTRY MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
21 PFN_COUNT MmNumberOfFreeNonPagedPool, MiExpansionPoolPagesInitialCharge;
22 PVOID MmNonPagedPoolEnd0;
23 PFN_NUMBER MiStartOfInitialPoolFrame, MiEndOfInitialPoolFrame;
24 KGUARDED_MUTEX MmPagedPoolMutex;
25 MM_PAGED_POOL_INFO MmPagedPoolInfo;
26 SIZE_T MmAllocatedNonPagedPool;
27 ULONG MmSpecialPoolTag;
28 ULONG MmConsumedPoolPercentage;
29 BOOLEAN MmProtectFreedNonPagedPool;
30 SLIST_HEADER MiNonPagedPoolSListHead;
31 ULONG MiNonPagedPoolSListMaximum = 4;
32 SLIST_HEADER MiPagedPoolSListHead;
33 ULONG MiPagedPoolSListMaximum = 8;
34
35 /* PRIVATE FUNCTIONS **********************************************************/
36
37 VOID
38 NTAPI
39 MiProtectFreeNonPagedPool(IN PVOID VirtualAddress,
40 IN ULONG PageCount)
41 {
42 PMMPTE PointerPte, LastPte;
43 MMPTE TempPte;
44
45 /* If pool is physical, can't protect PTEs */
46 if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) return;
47
48 /* Get PTE pointers and loop */
49 PointerPte = MiAddressToPte(VirtualAddress);
50 LastPte = PointerPte + PageCount;
51 do
52 {
53 /* Capture the PTE for safety */
54 TempPte = *PointerPte;
55
56 /* Mark it as an invalid PTE, set proto bit to recognize it as pool */
57 TempPte.u.Hard.Valid = 0;
58 TempPte.u.Soft.Prototype = 1;
59 MI_WRITE_INVALID_PTE(PointerPte, TempPte);
60 } while (++PointerPte < LastPte);
61
62 /* Flush the TLB */
63 KeFlushEntireTb(TRUE, TRUE);
64 }
65
66 BOOLEAN
67 NTAPI
68 MiUnProtectFreeNonPagedPool(IN PVOID VirtualAddress,
69 IN ULONG PageCount)
70 {
71 PMMPTE PointerPte;
72 MMPTE TempPte;
73 PFN_NUMBER UnprotectedPages = 0;
74
75 /* If pool is physical, can't protect PTEs */
76 if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) return FALSE;
77
78 /* Get, and capture the PTE */
79 PointerPte = MiAddressToPte(VirtualAddress);
80 TempPte = *PointerPte;
81
82 /* Loop protected PTEs */
83 while ((TempPte.u.Hard.Valid == 0) && (TempPte.u.Soft.Prototype == 1))
84 {
85 /* Unprotect the PTE */
86 TempPte.u.Hard.Valid = 1;
87 TempPte.u.Soft.Prototype = 0;
88 MI_WRITE_VALID_PTE(PointerPte, TempPte);
89
90 /* One more page */
91 if (++UnprotectedPages == PageCount) break;
92
93 /* Capture next PTE */
94 TempPte = *(++PointerPte);
95 }
96
97 /* Return if any pages were unprotected */
98 return UnprotectedPages ? TRUE : FALSE;
99 }
100
101 FORCEINLINE
102 VOID
103 MiProtectedPoolUnProtectLinks(IN PLIST_ENTRY Links,
104 OUT PVOID* PoolFlink,
105 OUT PVOID* PoolBlink)
106 {
107 BOOLEAN Safe;
108 PVOID PoolVa;
109
110 /* Initialize variables */
111 *PoolFlink = *PoolBlink = NULL;
112
113 /* Check if the list has entries */
114 if (IsListEmpty(Links) == FALSE)
115 {
116 /* We are going to need to forward link to do an insert */
117 PoolVa = Links->Flink;
118
119 /* So make it safe to access */
120 Safe = MiUnProtectFreeNonPagedPool(PoolVa, 1);
121 if (Safe) *PoolFlink = PoolVa;
122 }
123
124 /* Are we going to need a backward link too? */
125 if (Links != Links->Blink)
126 {
127 /* Get the head's backward link for the insert */
128 PoolVa = Links->Blink;
129
130 /* Make it safe to access */
131 Safe = MiUnProtectFreeNonPagedPool(PoolVa, 1);
132 if (Safe) *PoolBlink = PoolVa;
133 }
134 }
135
136 FORCEINLINE
137 VOID
138 MiProtectedPoolProtectLinks(IN PVOID PoolFlink,
139 IN PVOID PoolBlink)
140 {
141 /* Reprotect the pages, if they got unprotected earlier */
142 if (PoolFlink) MiProtectFreeNonPagedPool(PoolFlink, 1);
143 if (PoolBlink) MiProtectFreeNonPagedPool(PoolBlink, 1);
144 }
145
146 VOID
147 NTAPI
148 MiProtectedPoolInsertList(IN PLIST_ENTRY ListHead,
149 IN PLIST_ENTRY Entry,
150 IN BOOLEAN Critical)
151 {
152 PVOID PoolFlink, PoolBlink;
153
154 /* Make the list accessible */
155 MiProtectedPoolUnProtectLinks(ListHead, &PoolFlink, &PoolBlink);
156
157 /* Now insert in the right position */
158 Critical ? InsertHeadList(ListHead, Entry) : InsertTailList(ListHead, Entry);
159
160 /* And reprotect the pages containing the free links */
161 MiProtectedPoolProtectLinks(PoolFlink, PoolBlink);
162 }
163
164 VOID
165 NTAPI
166 MiProtectedPoolRemoveEntryList(IN PLIST_ENTRY Entry)
167 {
168 PVOID PoolFlink, PoolBlink;
169
170 /* Make the list accessible */
171 MiProtectedPoolUnProtectLinks(Entry, &PoolFlink, &PoolBlink);
172
173 /* Now remove */
174 RemoveEntryList(Entry);
175
176 /* And reprotect the pages containing the free links */
177 if (PoolFlink) MiProtectFreeNonPagedPool(PoolFlink, 1);
178 if (PoolBlink) MiProtectFreeNonPagedPool(PoolBlink, 1);
179 }
180
181 INIT_FUNCTION
182 VOID
183 NTAPI
184 MiInitializeNonPagedPoolThresholds(VOID)
185 {
186 PFN_NUMBER Size = MmMaximumNonPagedPoolInPages;
187
188 /* Default low threshold of 8MB or one third of nonpaged pool */
189 MiLowNonPagedPoolThreshold = (8 * _1MB) >> PAGE_SHIFT;
190 MiLowNonPagedPoolThreshold = min(MiLowNonPagedPoolThreshold, Size / 3);
191
192 /* Default high threshold of 20MB or 50% */
193 MiHighNonPagedPoolThreshold = (20 * _1MB) >> PAGE_SHIFT;
194 MiHighNonPagedPoolThreshold = min(MiHighNonPagedPoolThreshold, Size / 2);
195 ASSERT(MiLowNonPagedPoolThreshold < MiHighNonPagedPoolThreshold);
196 }
197
198 INIT_FUNCTION
199 VOID
200 NTAPI
201 MiInitializePoolEvents(VOID)
202 {
203 KIRQL OldIrql;
204 PFN_NUMBER FreePoolInPages;
205
206 /* Lock paged pool */
207 KeAcquireGuardedMutex(&MmPagedPoolMutex);
208
209 /* Total size of the paged pool minus the allocated size, is free */
210 FreePoolInPages = MmSizeOfPagedPoolInPages - MmPagedPoolInfo.AllocatedPagedPool;
211
212 /* Check the initial state high state */
213 if (FreePoolInPages >= MiHighPagedPoolThreshold)
214 {
215 /* We have plenty of pool */
216 KeSetEvent(MiHighPagedPoolEvent, 0, FALSE);
217 }
218 else
219 {
220 /* We don't */
221 KeClearEvent(MiHighPagedPoolEvent);
222 }
223
224 /* Check the initial low state */
225 if (FreePoolInPages <= MiLowPagedPoolThreshold)
226 {
227 /* We're very low in free pool memory */
228 KeSetEvent(MiLowPagedPoolEvent, 0, FALSE);
229 }
230 else
231 {
232 /* We're not */
233 KeClearEvent(MiLowPagedPoolEvent);
234 }
235
236 /* Release the paged pool lock */
237 KeReleaseGuardedMutex(&MmPagedPoolMutex);
238
239 /* Now it's time for the nonpaged pool lock */
240 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
241
242 /* Free pages are the maximum minus what's been allocated */
243 FreePoolInPages = MmMaximumNonPagedPoolInPages - MmAllocatedNonPagedPool;
244
245 /* Check if we have plenty */
246 if (FreePoolInPages >= MiHighNonPagedPoolThreshold)
247 {
248 /* We do, set the event */
249 KeSetEvent(MiHighNonPagedPoolEvent, 0, FALSE);
250 }
251 else
252 {
253 /* We don't, clear the event */
254 KeClearEvent(MiHighNonPagedPoolEvent);
255 }
256
257 /* Check if we have very little */
258 if (FreePoolInPages <= MiLowNonPagedPoolThreshold)
259 {
260 /* We do, set the event */
261 KeSetEvent(MiLowNonPagedPoolEvent, 0, FALSE);
262 }
263 else
264 {
265 /* We don't, clear it */
266 KeClearEvent(MiLowNonPagedPoolEvent);
267 }
268
269 /* We're done, release the nonpaged pool lock */
270 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
271 }
272
273 INIT_FUNCTION
274 VOID
275 NTAPI
276 MiInitializeNonPagedPool(VOID)
277 {
278 ULONG i;
279 PFN_COUNT PoolPages;
280 PMMFREE_POOL_ENTRY FreeEntry, FirstEntry;
281 PMMPTE PointerPte;
282 PAGED_CODE();
283
284 //
285 // Initialize the pool S-LISTs as well as their maximum count. In general,
286 // we'll allow 8 times the default on a 2GB system, and two times the default
287 // on a 1GB system.
288 //
289 InitializeSListHead(&MiPagedPoolSListHead);
290 InitializeSListHead(&MiNonPagedPoolSListHead);
291 if (MmNumberOfPhysicalPages >= ((2 * _1GB) /PAGE_SIZE))
292 {
293 MiNonPagedPoolSListMaximum *= 8;
294 MiPagedPoolSListMaximum *= 8;
295 }
296 else if (MmNumberOfPhysicalPages >= (_1GB /PAGE_SIZE))
297 {
298 MiNonPagedPoolSListMaximum *= 2;
299 MiPagedPoolSListMaximum *= 2;
300 }
301
302 //
303 // However if debugging options for the pool are enabled, turn off the S-LIST
304 // to reduce the risk of messing things up even more
305 //
306 if (MmProtectFreedNonPagedPool)
307 {
308 MiNonPagedPoolSListMaximum = 0;
309 MiPagedPoolSListMaximum = 0;
310 }
311
312 //
313 // We keep 4 lists of free pages (4 lists help avoid contention)
314 //
315 for (i = 0; i < MI_MAX_FREE_PAGE_LISTS; i++)
316 {
317 //
318 // Initialize each of them
319 //
320 InitializeListHead(&MmNonPagedPoolFreeListHead[i]);
321 }
322
323 //
324 // Calculate how many pages the initial nonpaged pool has
325 //
326 PoolPages = (PFN_COUNT)BYTES_TO_PAGES(MmSizeOfNonPagedPoolInBytes);
327 MmNumberOfFreeNonPagedPool = PoolPages;
328
329 //
330 // Initialize the first free entry
331 //
332 FreeEntry = MmNonPagedPoolStart;
333 FirstEntry = FreeEntry;
334 FreeEntry->Size = PoolPages;
335 FreeEntry->Signature = MM_FREE_POOL_SIGNATURE;
336 FreeEntry->Owner = FirstEntry;
337
338 //
339 // Insert it into the last list
340 //
341 InsertHeadList(&MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS - 1],
342 &FreeEntry->List);
343
344 //
345 // Now create free entries for every single other page
346 //
347 while (PoolPages-- > 1)
348 {
349 //
350 // Link them all back to the original entry
351 //
352 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)FreeEntry + PAGE_SIZE);
353 FreeEntry->Owner = FirstEntry;
354 FreeEntry->Signature = MM_FREE_POOL_SIGNATURE;
355 }
356
357 //
358 // Validate and remember first allocated pool page
359 //
360 PointerPte = MiAddressToPte(MmNonPagedPoolStart);
361 ASSERT(PointerPte->u.Hard.Valid == 1);
362 MiStartOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
363
364 //
365 // Keep track of where initial nonpaged pool ends
366 //
367 MmNonPagedPoolEnd0 = (PVOID)((ULONG_PTR)MmNonPagedPoolStart +
368 MmSizeOfNonPagedPoolInBytes);
369
370 //
371 // Validate and remember last allocated pool page
372 //
373 PointerPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolEnd0 - 1));
374 ASSERT(PointerPte->u.Hard.Valid == 1);
375 MiEndOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
376
377 //
378 // Validate the first nonpaged pool expansion page (which is a guard page)
379 //
380 PointerPte = MiAddressToPte(MmNonPagedPoolExpansionStart);
381 ASSERT(PointerPte->u.Hard.Valid == 0);
382
383 //
384 // Calculate the size of the expansion region alone
385 //
386 MiExpansionPoolPagesInitialCharge = (PFN_COUNT)
387 BYTES_TO_PAGES(MmMaximumNonPagedPoolInBytes - MmSizeOfNonPagedPoolInBytes);
388
389 //
390 // Remove 2 pages, since there's a guard page on top and on the bottom
391 //
392 MiExpansionPoolPagesInitialCharge -= 2;
393
394 //
395 // Now initialize the nonpaged pool expansion PTE space. Remember there's a
396 // guard page on top so make sure to skip it. The bottom guard page will be
397 // guaranteed by the fact our size is off by one.
398 //
399 MiInitializeSystemPtes(PointerPte + 1,
400 MiExpansionPoolPagesInitialCharge,
401 NonPagedPoolExpansion);
402 }
403
404 POOL_TYPE
405 NTAPI
406 MmDeterminePoolType(IN PVOID PoolAddress)
407 {
408 //
409 // Use a simple bounds check
410 //
411 if (PoolAddress >= MmPagedPoolStart && PoolAddress <= MmPagedPoolEnd)
412 return PagedPool;
413 else if (PoolAddress >= MmNonPagedPoolStart && PoolAddress <= MmNonPagedPoolEnd)
414 return NonPagedPool;
415 KeBugCheckEx(BAD_POOL_CALLER, 0x42, (ULONG_PTR)PoolAddress, 0, 0);
416 }
417
418 PVOID
419 NTAPI
420 MiAllocatePoolPages(IN POOL_TYPE PoolType,
421 IN SIZE_T SizeInBytes)
422 {
423 PFN_NUMBER PageFrameNumber;
424 PFN_COUNT SizeInPages, PageTableCount;
425 ULONG i;
426 KIRQL OldIrql;
427 PLIST_ENTRY NextEntry, NextHead, LastHead;
428 PMMPTE PointerPte, StartPte;
429 PMMPDE PointerPde;
430 ULONG EndAllocation;
431 MMPTE TempPte;
432 MMPDE TempPde;
433 PMMPFN Pfn1;
434 PVOID BaseVa, BaseVaStart;
435 PMMFREE_POOL_ENTRY FreeEntry;
436
437 //
438 // Figure out how big the allocation is in pages
439 //
440 SizeInPages = (PFN_COUNT)BYTES_TO_PAGES(SizeInBytes);
441
442 //
443 // Check for overflow
444 //
445 if (SizeInPages == 0)
446 {
447 //
448 // Fail
449 //
450 return NULL;
451 }
452
453 //
454 // Handle paged pool
455 //
456 if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool)
457 {
458 //
459 // If only one page is being requested, try to grab it from the S-LIST
460 //
461 if ((SizeInPages == 1) && (ExQueryDepthSList(&MiPagedPoolSListHead)))
462 {
463 BaseVa = InterlockedPopEntrySList(&MiPagedPoolSListHead);
464 if (BaseVa) return BaseVa;
465 }
466
467 //
468 // Lock the paged pool mutex
469 //
470 KeAcquireGuardedMutex(&MmPagedPoolMutex);
471
472 //
473 // Find some empty allocation space
474 //
475 i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
476 SizeInPages,
477 MmPagedPoolInfo.PagedPoolHint);
478 if (i == 0xFFFFFFFF)
479 {
480 //
481 // Get the page bit count
482 //
483 i = ((SizeInPages - 1) / PTE_COUNT) + 1;
484 DPRINT("Paged pool expansion: %lu %x\n", i, SizeInPages);
485
486 //
487 // Check if there is enougn paged pool expansion space left
488 //
489 if (MmPagedPoolInfo.NextPdeForPagedPoolExpansion >
490 (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
491 {
492 //
493 // Out of memory!
494 //
495 DPRINT1("FAILED to allocate %Iu bytes from paged pool\n", SizeInBytes);
496 KeReleaseGuardedMutex(&MmPagedPoolMutex);
497 return NULL;
498 }
499
500 //
501 // Check if we'll have to expand past the last PTE we have available
502 //
503 if (((i - 1) + MmPagedPoolInfo.NextPdeForPagedPoolExpansion) >
504 (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
505 {
506 //
507 // We can only support this much then
508 //
509 PointerPde = MiPteToPde(MmPagedPoolInfo.LastPteForPagedPool);
510 PageTableCount = (PFN_COUNT)(PointerPde + 1 -
511 MmPagedPoolInfo.NextPdeForPagedPoolExpansion);
512 ASSERT(PageTableCount < i);
513 i = PageTableCount;
514 }
515 else
516 {
517 //
518 // Otherwise, there is plenty of space left for this expansion
519 //
520 PageTableCount = i;
521 }
522
523 //
524 // Get the template PDE we'll use to expand
525 //
526 TempPde = ValidKernelPde;
527
528 //
529 // Get the first PTE in expansion space
530 //
531 PointerPde = MmPagedPoolInfo.NextPdeForPagedPoolExpansion;
532 BaseVa = MiPdeToPte(PointerPde);
533 BaseVaStart = BaseVa;
534
535 //
536 // Lock the PFN database and loop pages
537 //
538 OldIrql = MiAcquirePfnLock();
539 do
540 {
541 //
542 // It should not already be valid
543 //
544 ASSERT(PointerPde->u.Hard.Valid == 0);
545
546 /* Request a page */
547 MI_SET_USAGE(MI_USAGE_PAGED_POOL);
548 MI_SET_PROCESS2("Kernel");
549 PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());
550 TempPde.u.Hard.PageFrameNumber = PageFrameNumber;
551 #if (_MI_PAGING_LEVELS >= 3)
552 /* On PAE/x64 systems, there's no double-buffering */
553 /* Initialize the PFN entry for it */
554 MiInitializePfnForOtherProcess(PageFrameNumber,
555 (PMMPTE)PointerPde,
556 PFN_FROM_PTE(MiAddressToPte(PointerPde)));
557
558 /* Write the actual PDE now */
559 MI_WRITE_VALID_PDE(PointerPde, TempPde);
560 #else
561 //
562 // Save it into our double-buffered system page directory
563 //
564 MmSystemPagePtes[((ULONG_PTR)PointerPde & (SYSTEM_PD_SIZE - 1)) / sizeof(MMPTE)] = TempPde;
565
566 /* Initialize the PFN */
567 MiInitializePfnForOtherProcess(PageFrameNumber,
568 (PMMPTE)PointerPde,
569 MmSystemPageDirectory[(PointerPde - MiAddressToPde(NULL)) / PDE_COUNT]);
570 #endif
571
572 //
573 // Move on to the next expansion address
574 //
575 PointerPde++;
576 BaseVa = (PVOID)((ULONG_PTR)BaseVa + PAGE_SIZE);
577 i--;
578 } while (i > 0);
579
580 //
581 // Release the PFN database lock
582 //
583 MiReleasePfnLock(OldIrql);
584
585 //
586 // These pages are now available, clear their availablity bits
587 //
588 EndAllocation = (ULONG)(MmPagedPoolInfo.NextPdeForPagedPoolExpansion -
589 (PMMPDE)MiAddressToPte(MmPagedPoolInfo.FirstPteForPagedPool)) *
590 PTE_COUNT;
591 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap,
592 EndAllocation,
593 PageTableCount * PTE_COUNT);
594
595 //
596 // Update the next expansion location
597 //
598 MmPagedPoolInfo.NextPdeForPagedPoolExpansion += PageTableCount;
599
600 //
601 // Zero out the newly available memory
602 //
603 RtlZeroMemory(BaseVaStart, PageTableCount * PAGE_SIZE);
604
605 //
606 // Now try consuming the pages again
607 //
608 i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
609 SizeInPages,
610 0);
611 if (i == 0xFFFFFFFF)
612 {
613 //
614 // Out of memory!
615 //
616 DPRINT1("FAILED to allocate %Iu bytes from paged pool\n", SizeInBytes);
617 KeReleaseGuardedMutex(&MmPagedPoolMutex);
618 return NULL;
619 }
620 }
621
622 //
623 // Update the pool hint if the request was just one page
624 //
625 if (SizeInPages == 1) MmPagedPoolInfo.PagedPoolHint = i + 1;
626
627 //
628 // Update the end bitmap so we know the bounds of this allocation when
629 // the time comes to free it
630 //
631 EndAllocation = i + SizeInPages - 1;
632 RtlSetBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, EndAllocation);
633
634 //
635 // Now we can release the lock (it mainly protects the bitmap)
636 //
637 KeReleaseGuardedMutex(&MmPagedPoolMutex);
638
639 //
640 // Now figure out where this allocation starts
641 //
642 BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT));
643
644 //
645 // Flush the TLB
646 //
647 KeFlushEntireTb(TRUE, TRUE);
648
649 /* Setup a demand-zero writable PTE */
650 MI_MAKE_SOFTWARE_PTE(&TempPte, MM_READWRITE);
651
652 //
653 // Find the first and last PTE, then loop them all
654 //
655 PointerPte = MiAddressToPte(BaseVa);
656 StartPte = PointerPte + SizeInPages;
657 do
658 {
659 //
660 // Write the demand zero PTE and keep going
661 //
662 MI_WRITE_INVALID_PTE(PointerPte, TempPte);
663 } while (++PointerPte < StartPte);
664
665 //
666 // Return the allocation address to the caller
667 //
668 return BaseVa;
669 }
670
671 //
672 // If only one page is being requested, try to grab it from the S-LIST
673 //
674 if ((SizeInPages == 1) && (ExQueryDepthSList(&MiNonPagedPoolSListHead)))
675 {
676 BaseVa = InterlockedPopEntrySList(&MiNonPagedPoolSListHead);
677 if (BaseVa) return BaseVa;
678 }
679
680 //
681 // Allocations of less than 4 pages go into their individual buckets
682 //
683 i = SizeInPages - 1;
684 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
685
686 //
687 // Loop through all the free page lists based on the page index
688 //
689 NextHead = &MmNonPagedPoolFreeListHead[i];
690 LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
691
692 //
693 // Acquire the nonpaged pool lock
694 //
695 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
696 do
697 {
698 //
699 // Now loop through all the free page entries in this given list
700 //
701 NextEntry = NextHead->Flink;
702 while (NextEntry != NextHead)
703 {
704 /* Is freed non paged pool enabled */
705 if (MmProtectFreedNonPagedPool)
706 {
707 /* We need to be able to touch this page, unprotect it */
708 MiUnProtectFreeNonPagedPool(NextEntry, 0);
709 }
710
711 //
712 // Grab the entry and see if it can handle our allocation
713 //
714 FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List);
715 ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
716 if (FreeEntry->Size >= SizeInPages)
717 {
718 //
719 // It does, so consume the pages from here
720 //
721 FreeEntry->Size -= SizeInPages;
722
723 //
724 // The allocation will begin in this free page area
725 //
726 BaseVa = (PVOID)((ULONG_PTR)FreeEntry +
727 (FreeEntry->Size << PAGE_SHIFT));
728
729 /* Remove the item from the list, depending if pool is protected */
730 if (MmProtectFreedNonPagedPool)
731 MiProtectedPoolRemoveEntryList(&FreeEntry->List);
732 else
733 RemoveEntryList(&FreeEntry->List);
734
735 //
736 // However, check if its' still got space left
737 //
738 if (FreeEntry->Size != 0)
739 {
740 /* Check which list to insert this entry into */
741 i = FreeEntry->Size - 1;
742 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
743
744 /* Insert the entry into the free list head, check for prot. pool */
745 if (MmProtectFreedNonPagedPool)
746 MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE);
747 else
748 InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
749
750 /* Is freed non paged pool protected? */
751 if (MmProtectFreedNonPagedPool)
752 {
753 /* Protect the freed pool! */
754 MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
755 }
756 }
757
758 //
759 // Grab the PTE for this allocation
760 //
761 PointerPte = MiAddressToPte(BaseVa);
762 ASSERT(PointerPte->u.Hard.Valid == 1);
763
764 //
765 // Grab the PFN NextEntry and index
766 //
767 Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
768
769 //
770 // Now mark it as the beginning of an allocation
771 //
772 ASSERT(Pfn1->u3.e1.StartOfAllocation == 0);
773 Pfn1->u3.e1.StartOfAllocation = 1;
774
775 /* Mark it as special pool if needed */
776 ASSERT(Pfn1->u4.VerifierAllocation == 0);
777 if (PoolType & VERIFIER_POOL_MASK)
778 {
779 Pfn1->u4.VerifierAllocation = 1;
780 }
781
782 //
783 // Check if the allocation is larger than one page
784 //
785 if (SizeInPages != 1)
786 {
787 //
788 // Navigate to the last PFN entry and PTE
789 //
790 PointerPte += SizeInPages - 1;
791 ASSERT(PointerPte->u.Hard.Valid == 1);
792 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
793 }
794
795 //
796 // Mark this PFN as the last (might be the same as the first)
797 //
798 ASSERT(Pfn1->u3.e1.EndOfAllocation == 0);
799 Pfn1->u3.e1.EndOfAllocation = 1;
800
801 //
802 // Release the nonpaged pool lock, and return the allocation
803 //
804 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
805 return BaseVa;
806 }
807
808 //
809 // Try the next free page entry
810 //
811 NextEntry = FreeEntry->List.Flink;
812
813 /* Is freed non paged pool protected? */
814 if (MmProtectFreedNonPagedPool)
815 {
816 /* Protect the freed pool! */
817 MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
818 }
819 }
820 } while (++NextHead < LastHead);
821
822 //
823 // If we got here, we're out of space.
824 // Start by releasing the lock
825 //
826 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
827
828 //
829 // Allocate some system PTEs
830 //
831 StartPte = MiReserveSystemPtes(SizeInPages, NonPagedPoolExpansion);
832 PointerPte = StartPte;
833 if (StartPte == NULL)
834 {
835 //
836 // Ran out of memory
837 //
838 DPRINT1("Out of NP Expansion Pool\n");
839 return NULL;
840 }
841
842 //
843 // Acquire the pool lock now
844 //
845 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
846
847 //
848 // Lock the PFN database too
849 //
850 MiAcquirePfnLockAtDpcLevel();
851
852 //
853 // Loop the pages
854 //
855 TempPte = ValidKernelPte;
856 do
857 {
858 /* Allocate a page */
859 MI_SET_USAGE(MI_USAGE_PAGED_POOL);
860 MI_SET_PROCESS2("Kernel");
861 PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());
862
863 /* Get the PFN entry for it and fill it out */
864 Pfn1 = MiGetPfnEntry(PageFrameNumber);
865 Pfn1->u3.e2.ReferenceCount = 1;
866 Pfn1->u2.ShareCount = 1;
867 Pfn1->PteAddress = PointerPte;
868 Pfn1->u3.e1.PageLocation = ActiveAndValid;
869 Pfn1->u4.VerifierAllocation = 0;
870
871 /* Write the PTE for it */
872 TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
873 MI_WRITE_VALID_PTE(PointerPte++, TempPte);
874 } while (--SizeInPages > 0);
875
876 //
877 // This is the last page
878 //
879 Pfn1->u3.e1.EndOfAllocation = 1;
880
881 //
882 // Get the first page and mark it as such
883 //
884 Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber);
885 Pfn1->u3.e1.StartOfAllocation = 1;
886
887 /* Mark it as a verifier allocation if needed */
888 ASSERT(Pfn1->u4.VerifierAllocation == 0);
889 if (PoolType & VERIFIER_POOL_MASK) Pfn1->u4.VerifierAllocation = 1;
890
891 //
892 // Release the PFN and nonpaged pool lock
893 //
894 MiReleasePfnLockFromDpcLevel();
895 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
896
897 //
898 // Return the address
899 //
900 return MiPteToAddress(StartPte);
901 }
902
903 ULONG
904 NTAPI
905 MiFreePoolPages(IN PVOID StartingVa)
906 {
907 PMMPTE PointerPte, StartPte;
908 PMMPFN Pfn1, StartPfn;
909 PFN_COUNT FreePages, NumberOfPages;
910 KIRQL OldIrql;
911 PMMFREE_POOL_ENTRY FreeEntry, NextEntry, LastEntry;
912 ULONG i, End;
913 ULONG_PTR Offset;
914
915 //
916 // Handle paged pool
917 //
918 if ((StartingVa >= MmPagedPoolStart) && (StartingVa <= MmPagedPoolEnd))
919 {
920 //
921 // Calculate the offset from the beginning of paged pool, and convert it
922 // into pages
923 //
924 Offset = (ULONG_PTR)StartingVa - (ULONG_PTR)MmPagedPoolStart;
925 i = (ULONG)(Offset >> PAGE_SHIFT);
926 End = i;
927
928 //
929 // Now use the end bitmap to scan until we find a set bit, meaning that
930 // this allocation finishes here
931 //
932 while (!RtlTestBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End)) End++;
933
934 //
935 // Now calculate the total number of pages this allocation spans. If it's
936 // only one page, add it to the S-LIST instead of freeing it
937 //
938 NumberOfPages = End - i + 1;
939 if ((NumberOfPages == 1) &&
940 (ExQueryDepthSList(&MiPagedPoolSListHead) < MiPagedPoolSListMaximum))
941 {
942 InterlockedPushEntrySList(&MiPagedPoolSListHead, StartingVa);
943 return 1;
944 }
945
946 /* Delete the actual pages */
947 PointerPte = MmPagedPoolInfo.FirstPteForPagedPool + i;
948 FreePages = MiDeleteSystemPageableVm(PointerPte, NumberOfPages, 0, NULL);
949 ASSERT(FreePages == NumberOfPages);
950
951 //
952 // Acquire the paged pool lock
953 //
954 KeAcquireGuardedMutex(&MmPagedPoolMutex);
955
956 //
957 // Clear the allocation and free bits
958 //
959 RtlClearBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End);
960 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, i, NumberOfPages);
961
962 //
963 // Update the hint if we need to
964 //
965 if (i < MmPagedPoolInfo.PagedPoolHint) MmPagedPoolInfo.PagedPoolHint = i;
966
967 //
968 // Release the lock protecting the bitmaps
969 //
970 KeReleaseGuardedMutex(&MmPagedPoolMutex);
971
972 //
973 // And finally return the number of pages freed
974 //
975 return NumberOfPages;
976 }
977
978 //
979 // Get the first PTE and its corresponding PFN entry. If this is also the
980 // last PTE, meaning that this allocation was only for one page, push it into
981 // the S-LIST instead of freeing it
982 //
983 StartPte = PointerPte = MiAddressToPte(StartingVa);
984 StartPfn = Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
985 if ((Pfn1->u3.e1.EndOfAllocation == 1) &&
986 (ExQueryDepthSList(&MiNonPagedPoolSListHead) < MiNonPagedPoolSListMaximum))
987 {
988 InterlockedPushEntrySList(&MiNonPagedPoolSListHead, StartingVa);
989 return 1;
990 }
991
992 //
993 // Loop until we find the last PTE
994 //
995 while (Pfn1->u3.e1.EndOfAllocation == 0)
996 {
997 //
998 // Keep going
999 //
1000 PointerPte++;
1001 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
1002 }
1003
1004 //
1005 // Now we know how many pages we have
1006 //
1007 NumberOfPages = (PFN_COUNT)(PointerPte - StartPte + 1);
1008
1009 //
1010 // Acquire the nonpaged pool lock
1011 //
1012 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
1013
1014 //
1015 // Mark the first and last PTEs as not part of an allocation anymore
1016 //
1017 StartPfn->u3.e1.StartOfAllocation = 0;
1018 Pfn1->u3.e1.EndOfAllocation = 0;
1019
1020 //
1021 // Assume we will free as many pages as the allocation was
1022 //
1023 FreePages = NumberOfPages;
1024
1025 //
1026 // Peek one page past the end of the allocation
1027 //
1028 PointerPte++;
1029
1030 //
1031 // Guard against going past initial nonpaged pool
1032 //
1033 if (MiGetPfnEntryIndex(Pfn1) == MiEndOfInitialPoolFrame)
1034 {
1035 //
1036 // This page is on the outskirts of initial nonpaged pool, so ignore it
1037 //
1038 Pfn1 = NULL;
1039 }
1040 else
1041 {
1042 /* Sanity check */
1043 ASSERT((ULONG_PTR)StartingVa + NumberOfPages <= (ULONG_PTR)MmNonPagedPoolEnd);
1044
1045 /* Check if protected pool is enabled */
1046 if (MmProtectFreedNonPagedPool)
1047 {
1048 /* The freed block will be merged, it must be made accessible */
1049 MiUnProtectFreeNonPagedPool(MiPteToAddress(PointerPte), 0);
1050 }
1051
1052 //
1053 // Otherwise, our entire allocation must've fit within the initial non
1054 // paged pool, or the expansion nonpaged pool, so get the PFN entry of
1055 // the next allocation
1056 //
1057 if (PointerPte->u.Hard.Valid == 1)
1058 {
1059 //
1060 // It's either expansion or initial: get the PFN entry
1061 //
1062 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
1063 }
1064 else
1065 {
1066 //
1067 // This means we've reached the guard page that protects the end of
1068 // the expansion nonpaged pool
1069 //
1070 Pfn1 = NULL;
1071 }
1072
1073 }
1074
1075 //
1076 // Check if this allocation actually exists
1077 //
1078 if ((Pfn1) && (Pfn1->u3.e1.StartOfAllocation == 0))
1079 {
1080 //
1081 // It doesn't, so we should actually locate a free entry descriptor
1082 //
1083 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa +
1084 (NumberOfPages << PAGE_SHIFT));
1085 ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
1086 ASSERT(FreeEntry->Owner == FreeEntry);
1087
1088 /* Consume this entry's pages */
1089 FreePages += FreeEntry->Size;
1090
1091 /* Remove the item from the list, depending if pool is protected */
1092 if (MmProtectFreedNonPagedPool)
1093 MiProtectedPoolRemoveEntryList(&FreeEntry->List);
1094 else
1095 RemoveEntryList(&FreeEntry->List);
1096 }
1097
1098 //
1099 // Now get the official free entry we'll create for the caller's allocation
1100 //
1101 FreeEntry = StartingVa;
1102
1103 //
1104 // Check if the our allocation is the very first page
1105 //
1106 if (MiGetPfnEntryIndex(StartPfn) == MiStartOfInitialPoolFrame)
1107 {
1108 //
1109 // Then we can't do anything or we'll risk underflowing
1110 //
1111 Pfn1 = NULL;
1112 }
1113 else
1114 {
1115 //
1116 // Otherwise, get the PTE for the page right before our allocation
1117 //
1118 PointerPte -= NumberOfPages + 1;
1119
1120 /* Check if protected pool is enabled */
1121 if (MmProtectFreedNonPagedPool)
1122 {
1123 /* The freed block will be merged, it must be made accessible */
1124 MiUnProtectFreeNonPagedPool(MiPteToAddress(PointerPte), 0);
1125 }
1126
1127 /* Check if this is valid pool, or a guard page */
1128 if (PointerPte->u.Hard.Valid == 1)
1129 {
1130 //
1131 // It's either expansion or initial nonpaged pool, get the PFN entry
1132 //
1133 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
1134 }
1135 else
1136 {
1137 //
1138 // We must've reached the guard page, so don't risk touching it
1139 //
1140 Pfn1 = NULL;
1141 }
1142 }
1143
1144 //
1145 // Check if there is a valid PFN entry for the page before the allocation
1146 // and then check if this page was actually the end of an allocation.
1147 // If it wasn't, then we know for sure it's a free page
1148 //
1149 if ((Pfn1) && (Pfn1->u3.e1.EndOfAllocation == 0))
1150 {
1151 //
1152 // Get the free entry descriptor for that given page range
1153 //
1154 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa - PAGE_SIZE);
1155 ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
1156 FreeEntry = FreeEntry->Owner;
1157
1158 /* Check if protected pool is enabled */
1159 if (MmProtectFreedNonPagedPool)
1160 {
1161 /* The freed block will be merged, it must be made accessible */
1162 MiUnProtectFreeNonPagedPool(FreeEntry, 0);
1163 }
1164
1165 //
1166 // Check if the entry is small enough to be indexed on a free list
1167 // If it is, we'll want to re-insert it, since we're about to
1168 // collapse our pages on top of it, which will change its count
1169 //
1170 if (FreeEntry->Size < (MI_MAX_FREE_PAGE_LISTS - 1))
1171 {
1172 /* Remove the item from the list, depending if pool is protected */
1173 if (MmProtectFreedNonPagedPool)
1174 MiProtectedPoolRemoveEntryList(&FreeEntry->List);
1175 else
1176 RemoveEntryList(&FreeEntry->List);
1177
1178 //
1179 // Update its size
1180 //
1181 FreeEntry->Size += FreePages;
1182
1183 //
1184 // And now find the new appropriate list to place it in
1185 //
1186 i = (ULONG)(FreeEntry->Size - 1);
1187 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
1188
1189 /* Insert the entry into the free list head, check for prot. pool */
1190 if (MmProtectFreedNonPagedPool)
1191 MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE);
1192 else
1193 InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
1194 }
1195 else
1196 {
1197 //
1198 // Otherwise, just combine our free pages into this entry
1199 //
1200 FreeEntry->Size += FreePages;
1201 }
1202 }
1203
1204 //
1205 // Check if we were unable to do any compaction, and we'll stick with this
1206 //
1207 if (FreeEntry == StartingVa)
1208 {
1209 //
1210 // Well, now we are a free entry. At worse we just have our newly freed
1211 // pages, at best we have our pages plus whatever entry came after us
1212 //
1213 FreeEntry->Size = FreePages;
1214
1215 //
1216 // Find the appropriate list we should be on
1217 //
1218 i = FreeEntry->Size - 1;
1219 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
1220
1221 /* Insert the entry into the free list head, check for prot. pool */
1222 if (MmProtectFreedNonPagedPool)
1223 MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE);
1224 else
1225 InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
1226 }
1227
1228 //
1229 // Just a sanity check
1230 //
1231 ASSERT(FreePages != 0);
1232
1233 //
1234 // Get all the pages between our allocation and its end. These will all now
1235 // become free page chunks.
1236 //
1237 NextEntry = StartingVa;
1238 LastEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + (FreePages << PAGE_SHIFT));
1239 do
1240 {
1241 //
1242 // Link back to the parent free entry, and keep going
1243 //
1244 NextEntry->Owner = FreeEntry;
1245 NextEntry->Signature = MM_FREE_POOL_SIGNATURE;
1246 NextEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + PAGE_SIZE);
1247 } while (NextEntry != LastEntry);
1248
1249 /* Is freed non paged pool protected? */
1250 if (MmProtectFreedNonPagedPool)
1251 {
1252 /* Protect the freed pool! */
1253 MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
1254 }
1255
1256 //
1257 // We're done, release the lock and let the caller know how much we freed
1258 //
1259 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
1260 return NumberOfPages;
1261 }
1262
1263
1264 BOOLEAN
1265 NTAPI
1266 MiRaisePoolQuota(IN POOL_TYPE PoolType,
1267 IN ULONG CurrentMaxQuota,
1268 OUT PULONG NewMaxQuota)
1269 {
1270 //
1271 // Not implemented
1272 //
1273 UNIMPLEMENTED;
1274 *NewMaxQuota = CurrentMaxQuota + 65536;
1275 return TRUE;
1276 }
1277
1278 NTSTATUS
1279 NTAPI
1280 MiInitializeSessionPool(VOID)
1281 {
1282 PMMPTE PointerPte, LastPte;
1283 PMMPDE PointerPde, LastPde;
1284 PFN_NUMBER PageFrameIndex, PdeCount;
1285 PPOOL_DESCRIPTOR PoolDescriptor;
1286 PMM_SESSION_SPACE SessionGlobal;
1287 PMM_PAGED_POOL_INFO PagedPoolInfo;
1288 NTSTATUS Status;
1289 ULONG Index, PoolSize, BitmapSize;
1290 PAGED_CODE();
1291
1292 /* Lock session pool */
1293 SessionGlobal = MmSessionSpace->GlobalVirtualAddress;
1294 KeInitializeGuardedMutex(&SessionGlobal->PagedPoolMutex);
1295
1296 /* Setup a valid pool descriptor */
1297 PoolDescriptor = &MmSessionSpace->PagedPool;
1298 ExInitializePoolDescriptor(PoolDescriptor,
1299 PagedPoolSession,
1300 0,
1301 0,
1302 &SessionGlobal->PagedPoolMutex);
1303
1304 /* Setup the pool addresses */
1305 MmSessionSpace->PagedPoolStart = (PVOID)MiSessionPoolStart;
1306 MmSessionSpace->PagedPoolEnd = (PVOID)((ULONG_PTR)MiSessionPoolEnd - 1);
1307 DPRINT1("Session Pool Start: 0x%p End: 0x%p\n",
1308 MmSessionSpace->PagedPoolStart, MmSessionSpace->PagedPoolEnd);
1309
1310 /* Reset all the counters */
1311 PagedPoolInfo = &MmSessionSpace->PagedPoolInfo;
1312 PagedPoolInfo->PagedPoolCommit = 0;
1313 PagedPoolInfo->PagedPoolHint = 0;
1314 PagedPoolInfo->AllocatedPagedPool = 0;
1315
1316 /* Compute PDE and PTE addresses */
1317 PointerPde = MiAddressToPde(MmSessionSpace->PagedPoolStart);
1318 PointerPte = MiAddressToPte(MmSessionSpace->PagedPoolStart);
1319 LastPde = MiAddressToPde(MmSessionSpace->PagedPoolEnd);
1320 LastPte = MiAddressToPte(MmSessionSpace->PagedPoolEnd);
1321
1322 /* Write them down */
1323 MmSessionSpace->PagedPoolBasePde = PointerPde;
1324 PagedPoolInfo->FirstPteForPagedPool = PointerPte;
1325 PagedPoolInfo->LastPteForPagedPool = LastPte;
1326 PagedPoolInfo->NextPdeForPagedPoolExpansion = PointerPde + 1;
1327
1328 /* Zero the PDEs */
1329 PdeCount = LastPde - PointerPde;
1330 RtlZeroMemory(PointerPde, (PdeCount + 1) * sizeof(MMPTE));
1331
1332 /* Initialize the PFN for the PDE */
1333 Status = MiInitializeAndChargePfn(&PageFrameIndex,
1334 PointerPde,
1335 MmSessionSpace->SessionPageDirectoryIndex,
1336 TRUE);
1337 ASSERT(NT_SUCCESS(Status) == TRUE);
1338
1339 /* Initialize the first page table */
1340 Index = (ULONG_PTR)MmSessionSpace->PagedPoolStart - (ULONG_PTR)MmSessionBase;
1341 Index >>= 22;
1342 #ifndef _M_AMD64 // FIXME
1343 ASSERT(MmSessionSpace->PageTables[Index].u.Long == 0);
1344 MmSessionSpace->PageTables[Index] = *PointerPde;
1345 #endif
1346
1347 /* Bump up counters */
1348 InterlockedIncrementSizeT(&MmSessionSpace->NonPageablePages);
1349 InterlockedIncrementSizeT(&MmSessionSpace->CommittedPages);
1350
1351 /* Compute the size of the pool in pages, and of the bitmap for it */
1352 PoolSize = MmSessionPoolSize >> PAGE_SHIFT;
1353 BitmapSize = sizeof(RTL_BITMAP) + ((PoolSize + 31) / 32) * sizeof(ULONG);
1354
1355 /* Allocate and initialize the bitmap to track allocations */
1356 PagedPoolInfo->PagedPoolAllocationMap = ExAllocatePoolWithTag(NonPagedPool,
1357 BitmapSize,
1358 TAG_MM);
1359 ASSERT(PagedPoolInfo->PagedPoolAllocationMap != NULL);
1360 RtlInitializeBitMap(PagedPoolInfo->PagedPoolAllocationMap,
1361 (PULONG)(PagedPoolInfo->PagedPoolAllocationMap + 1),
1362 PoolSize);
1363
1364 /* Set all bits, but clear the first page table's worth */
1365 RtlSetAllBits(PagedPoolInfo->PagedPoolAllocationMap);
1366 RtlClearBits(PagedPoolInfo->PagedPoolAllocationMap, 0, PTE_PER_PAGE);
1367
1368 /* Allocate and initialize the bitmap to track free space */
1369 PagedPoolInfo->EndOfPagedPoolBitmap = ExAllocatePoolWithTag(NonPagedPool,
1370 BitmapSize,
1371 TAG_MM);
1372 ASSERT(PagedPoolInfo->EndOfPagedPoolBitmap != NULL);
1373 RtlInitializeBitMap(PagedPoolInfo->EndOfPagedPoolBitmap,
1374 (PULONG)(PagedPoolInfo->EndOfPagedPoolBitmap + 1),
1375 PoolSize);
1376
1377 /* Clear all the bits and return success */
1378 RtlClearAllBits(PagedPoolInfo->EndOfPagedPoolBitmap);
1379 return STATUS_SUCCESS;
1380 }
1381
1382 /* PUBLIC FUNCTIONS ***********************************************************/
1383
1384 /*
1385 * @unimplemented
1386 */
1387 PVOID
1388 NTAPI
1389 MmAllocateMappingAddress(IN SIZE_T NumberOfBytes,
1390 IN ULONG PoolTag)
1391 {
1392 UNIMPLEMENTED;
1393 return NULL;
1394 }
1395
1396 /*
1397 * @unimplemented
1398 */
1399 VOID
1400 NTAPI
1401 MmFreeMappingAddress(IN PVOID BaseAddress,
1402 IN ULONG PoolTag)
1403 {
1404 UNIMPLEMENTED;
1405 }
1406
1407 /* EOF */