2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/special.c
5 * PURPOSE: ARM Memory Manager Special Pool implementation
6 * PROGRAMMERS: ReactOS Portable Systems Group
11 http://msdn.microsoft.com/en-us/library/ff551832(v=VS.85).aspx
14 /* INCLUDES *******************************************************************/
20 #define MODULE_INVOLVED_IN_ARM3
21 #include <mm/ARM3/miarm.h>
23 extern ULONG ExpPoolFlags
;
24 extern PMMPTE MmSystemPteBase
;
28 MiReserveAlignedSystemPtes(IN ULONG NumberOfPtes
,
29 IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType
,
32 /* GLOBALS ********************************************************************/
34 #define SPECIAL_POOL_PAGED_PTE 0x2000
35 #define SPECIAL_POOL_NONPAGED_PTE 0x4000
36 #define SPECIAL_POOL_PAGED 0x8000
38 PVOID MmSpecialPoolStart
;
39 PVOID MmSpecialPoolEnd
;
40 PVOID MiSpecialPoolExtra
;
41 ULONG MiSpecialPoolExtraCount
;
43 PMMPTE MiSpecialPoolFirstPte
;
44 PMMPTE MiSpecialPoolLastPte
;
46 PFN_COUNT MmSpecialPagesInUse
;
47 PFN_COUNT MmSpecialPagesInUsePeak
;
48 PFN_COUNT MiSpecialPagesPagable
;
49 PFN_COUNT MiSpecialPagesPagablePeak
;
50 PFN_COUNT MiSpecialPagesNonPaged
;
51 PFN_COUNT MiSpecialPagesNonPagedPeak
;
52 PFN_COUNT MiSpecialPagesNonPagedMaximum
;
54 BOOLEAN MmSpecialPoolCatchOverruns
= TRUE
;
56 typedef struct _MI_FREED_SPECIAL_POOL
58 POOL_HEADER OverlaidPoolHeader
;
59 /* TODO: Add overlaid verifier pool header */
62 ULONG NumberOfBytesRequested
;
68 UCHAR StackData
[0x400];
69 } MI_FREED_SPECIAL_POOL
, *PMI_FREED_SPECIAL_POOL
;
71 /* PRIVATE FUNCTIONS **********************************************************/
73 VOID NTAPI
MiTestSpecialPool(VOID
);
77 MmUseSpecialPool(SIZE_T NumberOfBytes
, ULONG Tag
)
79 /* Special pool is not suitable for allocations bigger than 1 page */
80 if (NumberOfBytes
> (PAGE_SIZE
- sizeof(POOL_HEADER
)))
85 if (MmSpecialPoolTag
== '*')
90 return Tag
== MmSpecialPoolTag
;
95 MmIsSpecialPoolAddress(PVOID P
)
97 return ((P
>= MmSpecialPoolStart
) &&
98 (P
<= MmSpecialPoolEnd
));
103 MmIsSpecialPoolAddressFree(PVOID P
)
107 ASSERT(MmIsSpecialPoolAddress(P
));
108 PointerPte
= MiAddressToPte(P
);
110 if (PointerPte
->u
.Soft
.PageFileHigh
== SPECIAL_POOL_PAGED_PTE
||
111 PointerPte
->u
.Soft
.PageFileHigh
== SPECIAL_POOL_NONPAGED_PTE
)
123 MiInitializeSpecialPool(VOID
)
125 ULONG SpecialPoolPtes
, i
;
128 /* Check if there is a special pool tag */
129 if ((MmSpecialPoolTag
== 0) ||
130 (MmSpecialPoolTag
== -1)) return;
132 /* Calculate number of system PTEs for the special pool */
133 if (MmNumberOfSystemPtes
>= 0x3000)
134 SpecialPoolPtes
= MmNumberOfSystemPtes
/ 3;
136 SpecialPoolPtes
= MmNumberOfSystemPtes
/ 6;
138 /* Don't let the number go too high */
139 if (SpecialPoolPtes
> 0x6000) SpecialPoolPtes
= 0x6000;
141 /* Round up to the page size */
142 SpecialPoolPtes
= PAGE_ROUND_UP(SpecialPoolPtes
);
144 ASSERT((SpecialPoolPtes
& (PTE_PER_PAGE
- 1)) == 0);
146 /* Reserve those PTEs */
149 PointerPte
= MiReserveAlignedSystemPtes(SpecialPoolPtes
,
151 /*0x400000*/0); // FIXME:
152 if (PointerPte
) break;
154 /* Reserving didn't work, so try to reduce the requested size */
155 ASSERT(SpecialPoolPtes
>= PTE_PER_PAGE
);
156 SpecialPoolPtes
-= PTE_PER_PAGE
;
157 } while (SpecialPoolPtes
);
159 /* Fail if we couldn't reserve them at all */
160 if (!SpecialPoolPtes
) return;
162 /* Make sure we got enough */
163 ASSERT(SpecialPoolPtes
>= PTE_PER_PAGE
);
165 /* Save first PTE and its address */
166 MiSpecialPoolFirstPte
= PointerPte
;
167 MmSpecialPoolStart
= MiPteToAddress(PointerPte
);
169 for (i
= 0; i
< PTE_PER_PAGE
/ 2; i
++)
171 /* Point it to the next entry */
172 PointerPte
->u
.List
.NextEntry
= &PointerPte
[2] - MmSystemPteBase
;
174 /* Move to the next pair */
178 /* Save extra values */
179 MiSpecialPoolExtra
= PointerPte
;
180 MiSpecialPoolExtraCount
= SpecialPoolPtes
- PTE_PER_PAGE
;
182 /* Mark the previous PTE as the last one */
183 MiSpecialPoolLastPte
= PointerPte
- 2;
184 MiSpecialPoolLastPte
->u
.List
.NextEntry
= MM_EMPTY_PTE_LIST
;
186 /* Save end address of the special pool */
187 MmSpecialPoolEnd
= MiPteToAddress(MiSpecialPoolLastPte
+ 1);
189 /* Calculate maximum non-paged part of the special pool */
190 MiSpecialPagesNonPagedMaximum
= MmResidentAvailablePages
>> 4;
192 /* And limit it if it turned out to be too big */
193 if (MmNumberOfPhysicalPages
> 0x3FFF)
194 MiSpecialPagesNonPagedMaximum
= MmResidentAvailablePages
>> 3;
196 DPRINT1("Special pool start %p - end %p\n", MmSpecialPoolStart
, MmSpecialPoolEnd
);
197 ExpPoolFlags
|= POOL_FLAG_SPECIAL_POOL
;
199 //MiTestSpecialPool();
204 MmExpandSpecialPool(VOID
)
209 MI_ASSERT_PFN_LOCK_HELD();
211 if (MiSpecialPoolExtraCount
== 0)
212 return STATUS_INSUFFICIENT_RESOURCES
;
214 PointerPte
= MiSpecialPoolExtra
;
215 ASSERT(MiSpecialPoolFirstPte
== MiSpecialPoolLastPte
);
216 ASSERT(MiSpecialPoolFirstPte
->u
.List
.NextEntry
== MM_EMPTY_PTE_LIST
);
217 MiSpecialPoolFirstPte
->u
.List
.NextEntry
= PointerPte
- MmSystemPteBase
;
219 ASSERT(MiSpecialPoolExtraCount
>= PTE_PER_PAGE
);
220 for (i
= 0; i
< PTE_PER_PAGE
/ 2; i
++)
222 /* Point it to the next entry */
223 PointerPte
->u
.List
.NextEntry
= &PointerPte
[2] - MmSystemPteBase
;
225 /* Move to the next pair */
229 /* Save remaining extra values */
230 MiSpecialPoolExtra
= PointerPte
;
231 MiSpecialPoolExtraCount
-= PTE_PER_PAGE
;
233 /* Mark the previous PTE as the last one */
234 MiSpecialPoolLastPte
= PointerPte
- 2;
235 MiSpecialPoolLastPte
->u
.List
.NextEntry
= MM_EMPTY_PTE_LIST
;
237 /* Save new end address of the special pool */
238 MmSpecialPoolEnd
= MiPteToAddress(MiSpecialPoolLastPte
+ 1);
240 return STATUS_SUCCESS
;
245 MmAllocateSpecialPool(SIZE_T NumberOfBytes
, ULONG Tag
, POOL_TYPE PoolType
, ULONG SpecialType
)
248 MMPTE TempPte
= ValidKernelPte
;
250 PFN_NUMBER PageFrameNumber
;
251 LARGE_INTEGER TickCount
;
254 PFN_COUNT PagesInUse
;
256 DPRINT("MmAllocateSpecialPool(%x %x %x %x)\n", NumberOfBytes
, Tag
, PoolType
, SpecialType
);
258 /* Check if the pool is initialized and quit if it's not */
259 if (!MiSpecialPoolFirstPte
) return NULL
;
261 /* Get the pool type */
262 PoolType
&= BASE_POOL_TYPE_MASK
;
264 /* Check whether current IRQL matches the pool type */
265 Irql
= KeGetCurrentIrql();
267 if (((PoolType
== PagedPool
) && (Irql
> APC_LEVEL
)) ||
268 ((PoolType
!= PagedPool
) && (Irql
> DISPATCH_LEVEL
)))
271 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION
,
278 /* Some allocations from Mm must never use special pool */
281 /* Reject and let normal pool handle it */
285 /* TODO: Take into account various limitations */
287 /* Heed the maximum limit of nonpaged pages */
288 if ((PoolType
== NonPagedPool
) &&
289 (MiSpecialPagesNonPaged
> MiSpecialPagesNonPagedMaximum
))
294 /* Lock PFN database */
295 Irql
= MiAcquirePfnLock();
297 /* Reject allocation in case amount of available pages is too small */
298 if (MmAvailablePages
< 0x100)
300 /* Release the PFN database lock */
301 MiReleasePfnLock(Irql
);
302 DPRINT1("Special pool: MmAvailablePages 0x%x is too small\n", MmAvailablePages
);
306 /* Check if special pool PTE list is exhausted */
307 if (MiSpecialPoolFirstPte
->u
.List
.NextEntry
== MM_EMPTY_PTE_LIST
)
309 /* Try to expand it */
310 if (!NT_SUCCESS(MmExpandSpecialPool()))
312 /* No reserves left, reject this allocation */
314 MiReleasePfnLock(Irql
);
315 if (!once
++) DPRINT1("Special pool: No PTEs left!\n");
318 ASSERT(MiSpecialPoolFirstPte
->u
.List
.NextEntry
!= MM_EMPTY_PTE_LIST
);
321 /* Save allocation time */
322 KeQueryTickCount(&TickCount
);
324 /* Get a pointer to the first PTE */
325 PointerPte
= MiSpecialPoolFirstPte
;
327 /* Set the first PTE pointer to the next one in the list */
328 MiSpecialPoolFirstPte
= MmSystemPteBase
+ PointerPte
->u
.List
.NextEntry
;
330 /* Allocate a physical page */
331 if (PoolType
== PagedPool
)
333 MI_SET_USAGE(MI_USAGE_PAGED_POOL
);
337 MI_SET_USAGE(MI_USAGE_NONPAGED_POOL
);
339 MI_SET_PROCESS2("Kernel-Special");
340 PageFrameNumber
= MiRemoveAnyPage(MI_GET_NEXT_COLOR());
342 /* Initialize PFN and make it valid */
343 TempPte
.u
.Hard
.PageFrameNumber
= PageFrameNumber
;
344 MiInitializePfnAndMakePteValid(PageFrameNumber
, PointerPte
, TempPte
);
346 /* Release the PFN database lock */
347 MiReleasePfnLock(Irql
);
349 /* Increase page counter */
350 PagesInUse
= InterlockedIncrementUL(&MmSpecialPagesInUse
);
351 if (PagesInUse
> MmSpecialPagesInUsePeak
)
352 MmSpecialPagesInUsePeak
= PagesInUse
;
354 /* Put some content into the page. Low value of tick count would do */
355 Entry
= MiPteToAddress(PointerPte
);
356 RtlFillMemory(Entry
, PAGE_SIZE
, TickCount
.LowPart
);
358 /* Calculate header and entry addresses */
359 if ((SpecialType
!= 0) &&
360 ((SpecialType
== 1) || (!MmSpecialPoolCatchOverruns
)))
362 /* We catch underruns. Data is at the beginning of the page */
363 Header
= (PPOOL_HEADER
)((PUCHAR
)Entry
+ PAGE_SIZE
- sizeof(POOL_HEADER
));
367 /* We catch overruns. Data is at the end of the page */
368 Header
= (PPOOL_HEADER
)Entry
;
369 Entry
= (PVOID
)((ULONG_PTR
)((PUCHAR
)Entry
- NumberOfBytes
+ PAGE_SIZE
) & ~((LONG_PTR
)sizeof(POOL_HEADER
) - 1));
372 /* Initialize the header */
373 RtlZeroMemory(Header
, sizeof(POOL_HEADER
));
375 /* Save allocation size there */
376 Header
->Ulong1
= (ULONG
)NumberOfBytes
;
378 /* Make sure it's all good */
379 ASSERT((NumberOfBytes
<= PAGE_SIZE
- sizeof(POOL_HEADER
)) &&
380 (PAGE_SIZE
<= 32 * 1024));
382 /* Mark it as paged or nonpaged */
383 if (PoolType
== PagedPool
)
385 /* Add pagedpool flag into the pool header too */
386 Header
->Ulong1
|= SPECIAL_POOL_PAGED
;
388 /* Also mark the next PTE as special-pool-paged */
389 PointerPte
[1].u
.Soft
.PageFileHigh
|= SPECIAL_POOL_PAGED_PTE
;
391 /* Increase pagable counter */
392 PagesInUse
= InterlockedIncrementUL(&MiSpecialPagesPagable
);
393 if (PagesInUse
> MiSpecialPagesPagablePeak
)
394 MiSpecialPagesPagablePeak
= PagesInUse
;
398 /* Mark the next PTE as special-pool-nonpaged */
399 PointerPte
[1].u
.Soft
.PageFileHigh
|= SPECIAL_POOL_NONPAGED_PTE
;
401 /* Increase nonpaged counter */
402 PagesInUse
= InterlockedIncrementUL(&MiSpecialPagesNonPaged
);
403 if (PagesInUse
> MiSpecialPagesNonPagedPeak
)
404 MiSpecialPagesNonPagedPeak
= PagesInUse
;
407 /* Finally save tag and put allocation time into the header's blocksize.
408 That time will be used to check memory consistency within the allocated
410 Header
->PoolTag
= Tag
;
411 Header
->BlockSize
= (UCHAR
)TickCount
.LowPart
;
412 DPRINT("%p\n", Entry
);
418 MiSpecialPoolCheckPattern(PUCHAR P
, PPOOL_HEADER Header
)
420 ULONG BytesToCheck
, BytesRequested
, Index
;
423 /* Get amount of bytes user requested to be allocated by clearing out the paged mask */
424 BytesRequested
= (Header
->Ulong1
& ~SPECIAL_POOL_PAGED
) & 0xFFFF;
425 ASSERT(BytesRequested
<= PAGE_SIZE
- sizeof(POOL_HEADER
));
427 /* Get a pointer to the end of user's area */
428 Ptr
= P
+ BytesRequested
;
430 /* Calculate how many bytes to check */
431 BytesToCheck
= (ULONG
)((PUCHAR
)PAGE_ALIGN(P
) + PAGE_SIZE
- Ptr
);
433 /* Remove pool header size if we're catching underruns */
434 if (((ULONG_PTR
)P
& (PAGE_SIZE
- 1)) == 0)
436 /* User buffer is located in the beginning of the page */
437 BytesToCheck
-= sizeof(POOL_HEADER
);
440 /* Check the pattern after user buffer */
441 for (Index
= 0; Index
< BytesToCheck
; Index
++)
443 /* Bugcheck if bytes don't match */
444 if (Ptr
[Index
] != Header
->BlockSize
)
446 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION
,
448 (ULONG_PTR
)&Ptr
[Index
],
457 MmFreeSpecialPool(PVOID P
)
461 BOOLEAN Overruns
= FALSE
;
462 KIRQL Irql
= KeGetCurrentIrql();
464 ULONG BytesRequested
, BytesReal
= 0;
467 PMI_FREED_SPECIAL_POOL FreedHeader
;
468 LARGE_INTEGER TickCount
;
471 DPRINT("MmFreeSpecialPool(%p)\n", P
);
474 PointerPte
= MiAddressToPte(P
);
476 /* Check if it's valid */
477 if (PointerPte
->u
.Hard
.Valid
== 0)
479 /* Bugcheck if it has NOACCESS or 0 set as protection */
480 if (PointerPte
->u
.Soft
.Protection
== MM_NOACCESS
||
481 !PointerPte
->u
.Soft
.Protection
)
483 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION
,
485 (ULONG_PTR
)PointerPte
,
491 /* Determine if it's a underruns or overruns pool pointer */
492 PtrOffset
= (ULONG
)((ULONG_PTR
)P
& (PAGE_SIZE
- 1));
495 /* Pool catches overruns */
496 Header
= PAGE_ALIGN(P
);
501 /* Pool catches underruns */
502 Header
= (PPOOL_HEADER
)((PUCHAR
)PAGE_ALIGN(P
) + PAGE_SIZE
- sizeof(POOL_HEADER
));
505 /* Check if it's non paged pool */
506 if ((Header
->Ulong1
& SPECIAL_POOL_PAGED
) == 0)
508 /* Non-paged allocation, ensure that IRQ is not higher that DISPATCH */
509 PoolType
= NonPagedPool
;
510 ASSERT(PointerPte
[1].u
.Soft
.PageFileHigh
== SPECIAL_POOL_NONPAGED_PTE
);
511 if (Irql
> DISPATCH_LEVEL
)
513 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION
,
522 /* Paged allocation, ensure */
523 PoolType
= PagedPool
;
524 ASSERT(PointerPte
[1].u
.Soft
.PageFileHigh
== SPECIAL_POOL_PAGED_PTE
);
525 if (Irql
> APC_LEVEL
)
527 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION
,
535 /* Get amount of bytes user requested to be allocated by clearing out the paged mask */
536 BytesRequested
= (Header
->Ulong1
& ~SPECIAL_POOL_PAGED
) & 0xFFFF;
537 ASSERT(BytesRequested
<= PAGE_SIZE
- sizeof(POOL_HEADER
));
539 /* Check memory before the allocated user buffer in case of overruns detection */
542 /* Calculate the real placement of the buffer */
543 BytesReal
= PAGE_SIZE
- PtrOffset
;
545 /* If they mismatch, it's unrecoverable */
546 if (BytesRequested
> BytesReal
)
548 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION
,
555 if (BytesRequested
+ sizeof(POOL_HEADER
) < BytesReal
)
557 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION
,
564 /* Actually check the memory pattern */
565 for (b
= (PUCHAR
)(Header
+ 1); b
< (PUCHAR
)P
; b
++)
567 if (*b
!= Header
->BlockSize
)
570 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION
,
579 /* Check the memory pattern after the user buffer */
580 MiSpecialPoolCheckPattern(P
, Header
);
582 /* Fill the freed header */
583 KeQueryTickCount(&TickCount
);
584 FreedHeader
= (PMI_FREED_SPECIAL_POOL
)PAGE_ALIGN(P
);
585 FreedHeader
->Signature
= 0x98764321;
586 FreedHeader
->TickCount
= TickCount
.LowPart
;
587 FreedHeader
->NumberOfBytesRequested
= BytesRequested
;
588 FreedHeader
->Pagable
= PoolType
;
589 FreedHeader
->VirtualAddress
= P
;
590 FreedHeader
->Thread
= PsGetCurrentThread();
591 /* TODO: Fill StackPointer and StackBytes */
592 FreedHeader
->StackPointer
= NULL
;
593 FreedHeader
->StackBytes
= 0;
595 if (PoolType
== NonPagedPool
)
597 /* Non pagable. Get PFN element corresponding to the PTE */
598 Pfn
= MI_PFN_ELEMENT(PointerPte
->u
.Hard
.PageFrameNumber
);
600 /* Count the page as free */
601 InterlockedDecrementUL(&MiSpecialPagesNonPaged
);
603 /* Lock PFN database */
604 Irql
= MiAcquirePfnLock();
606 /* Delete this PFN */
607 MI_SET_PFN_DELETED(Pfn
);
609 /* Decrement share count of this PFN */
610 MiDecrementShareCount(Pfn
, PointerPte
->u
.Hard
.PageFrameNumber
);
612 MI_ERASE_PTE(PointerPte
);
615 //FIXME: Use KeFlushSingleTb() instead
616 KeFlushEntireTb(TRUE
, TRUE
);
620 /* Pagable. Delete that virtual address */
621 MiDeleteSystemPageableVm(PointerPte
, 1, 0, NULL
);
623 /* Count the page as free */
624 InterlockedDecrementUL(&MiSpecialPagesPagable
);
626 /* Lock PFN database */
627 Irql
= MiAcquirePfnLock();
630 /* Mark next PTE as invalid */
631 MI_ERASE_PTE(PointerPte
+ 1);
633 /* Make sure that the last entry is really the last one */
634 ASSERT(MiSpecialPoolLastPte
->u
.List
.NextEntry
== MM_EMPTY_PTE_LIST
);
636 /* Update the current last PTE next pointer */
637 MiSpecialPoolLastPte
->u
.List
.NextEntry
= PointerPte
- MmSystemPteBase
;
639 /* PointerPte becomes the new last PTE */
640 PointerPte
->u
.List
.NextEntry
= MM_EMPTY_PTE_LIST
;
641 MiSpecialPoolLastPte
= PointerPte
;
643 /* Release the PFN database lock */
644 MiReleasePfnLock(Irql
);
646 /* Update page counter */
647 InterlockedDecrementUL(&MmSpecialPagesInUse
);
652 MiTestSpecialPool(VOID
)
658 POOL_TYPE PoolType
= PagedPool
;
660 // First allocate/free
661 for (i
=0; i
<100; i
++)
663 ByteSize
= (100 * (i
+1)) % (PAGE_SIZE
- sizeof(POOL_HEADER
));
664 p1
= MmAllocateSpecialPool(ByteSize
, 'TEST', PoolType
, 0);
665 DPRINT1("p1 %p size %lu\n", p1
, ByteSize
);
666 MmFreeSpecialPool(p1
);
669 // Now allocate all at once, then free at once
670 for (i
=0; i
<100; i
++)
672 ByteSize
= (100 * (i
+1)) % (PAGE_SIZE
- sizeof(POOL_HEADER
));
673 p2
[i
] = MmAllocateSpecialPool(ByteSize
, 'TEST', PoolType
, 0);
674 DPRINT1("p2[%lu] %p size %lu\n", i
, p1
, ByteSize
);
676 for (i
=0; i
<100; i
++)
678 DPRINT1("Freeing %p\n", p2
[i
]);
679 MmFreeSpecialPool(p2
[i
]);
682 // Overrun the buffer to test
684 //p3 = MmAllocateSpecialPool(ByteSize, 'TEST', NonPagedPool, 0);
685 //p3[ByteSize] = 0xF1; // This should cause an exception
687 // Underrun the buffer to test
688 //p3 = MmAllocateSpecialPool(ByteSize, 'TEST', NonPagedPool, 1);
690 //*p3 = 0xF1; // This should cause an exception