2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/special.c
5 * PURPOSE: ARM Memory Manager Special Pool implementation
6 * PROGRAMMERS: ReactOS Portable Systems Group
11 http://msdn.microsoft.com/en-us/library/ff551832(v=VS.85).aspx
14 /* INCLUDES *******************************************************************/
20 #define MODULE_INVOLVED_IN_ARM3
21 #include <mm/ARM3/miarm.h>
23 extern ULONG ExpPoolFlags
;
24 extern PMMPTE MmSystemPteBase
;
28 MiReserveAlignedSystemPtes(IN ULONG NumberOfPtes
,
29 IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType
,
32 /* GLOBALS ********************************************************************/
34 #define SPECIAL_POOL_PAGED_PTE 0x2000
35 #define SPECIAL_POOL_NONPAGED_PTE 0x4000
36 #define SPECIAL_POOL_PAGED 0x8000
38 PVOID MmSpecialPoolStart
;
39 PVOID MmSpecialPoolEnd
;
40 PVOID MiSpecialPoolExtra
;
41 ULONG MiSpecialPoolExtraCount
;
43 PMMPTE MiSpecialPoolFirstPte
;
44 PMMPTE MiSpecialPoolLastPte
;
46 PFN_COUNT MmSpecialPagesInUse
;
47 PFN_COUNT MmSpecialPagesInUsePeak
;
48 PFN_COUNT MiSpecialPagesPagable
;
49 PFN_COUNT MiSpecialPagesPagablePeak
;
50 PFN_COUNT MiSpecialPagesNonPaged
;
51 PFN_COUNT MiSpecialPagesNonPagedPeak
;
52 PFN_COUNT MiSpecialPagesNonPagedMaximum
;
54 BOOLEAN MmSpecialPoolCatchOverruns
= TRUE
;
56 typedef struct _MI_FREED_SPECIAL_POOL
58 POOL_HEADER OverlaidPoolHeader
;
59 /* TODO: Add overlaid verifier pool header */
62 ULONG NumberOfBytesRequested
;
68 UCHAR StackData
[0x400];
69 } MI_FREED_SPECIAL_POOL
, *PMI_FREED_SPECIAL_POOL
;
71 /* PRIVATE FUNCTIONS **********************************************************/
73 VOID NTAPI
MiTestSpecialPool(VOID
);
77 MmUseSpecialPool(SIZE_T NumberOfBytes
, ULONG Tag
)
79 /* Special pool is not suitable for allocations bigger than 1 page */
80 if (NumberOfBytes
> (PAGE_SIZE
- sizeof(POOL_HEADER
)))
85 if (MmSpecialPoolTag
== '*')
90 return Tag
== MmSpecialPoolTag
;
95 MmIsSpecialPoolAddress(PVOID P
)
97 return ((P
>= MmSpecialPoolStart
) &&
98 (P
<= MmSpecialPoolEnd
));
103 MmIsSpecialPoolAddressFree(PVOID P
)
107 ASSERT(MmIsSpecialPoolAddress(P
));
108 PointerPte
= MiAddressToPte(P
);
110 if (PointerPte
->u
.Soft
.PageFileHigh
== SPECIAL_POOL_PAGED_PTE
||
111 PointerPte
->u
.Soft
.PageFileHigh
== SPECIAL_POOL_NONPAGED_PTE
)
123 MiInitializeSpecialPool(VOID
)
125 ULONG SpecialPoolPtes
, i
;
128 /* Check if there is a special pool tag */
129 if ((MmSpecialPoolTag
== 0) ||
130 (MmSpecialPoolTag
== -1)) return;
132 /* Calculate number of system PTEs for the special pool */
133 if (MmNumberOfSystemPtes
>= 0x3000)
134 SpecialPoolPtes
= MmNumberOfSystemPtes
/ 3;
136 SpecialPoolPtes
= MmNumberOfSystemPtes
/ 6;
138 /* Don't let the number go too high */
139 if (SpecialPoolPtes
> 0x6000) SpecialPoolPtes
= 0x6000;
141 /* Round up to the page size */
142 SpecialPoolPtes
= PAGE_ROUND_UP(SpecialPoolPtes
);
144 ASSERT((SpecialPoolPtes
& (PTE_PER_PAGE
- 1)) == 0);
146 /* Reserve those PTEs */
149 PointerPte
= MiReserveAlignedSystemPtes(SpecialPoolPtes
,
151 /*0x400000*/0); // FIXME:
152 if (PointerPte
) break;
154 /* Reserving didn't work, so try to reduce the requested size */
155 ASSERT(SpecialPoolPtes
>= PTE_PER_PAGE
);
156 SpecialPoolPtes
-= PTE_PER_PAGE
;
157 } while (SpecialPoolPtes
);
159 /* Fail if we couldn't reserve them at all */
160 if (!SpecialPoolPtes
) return;
162 /* Make sure we got enough */
163 ASSERT(SpecialPoolPtes
>= PTE_PER_PAGE
);
165 /* Save first PTE and its address */
166 MiSpecialPoolFirstPte
= PointerPte
;
167 MmSpecialPoolStart
= MiPteToAddress(PointerPte
);
169 for (i
= 0; i
< PTE_PER_PAGE
/ 2; i
++)
171 /* Point it to the next entry */
172 PointerPte
->u
.List
.NextEntry
= &PointerPte
[2] - MmSystemPteBase
;
174 /* Move to the next pair */
178 /* Save extra values */
179 MiSpecialPoolExtra
= PointerPte
;
180 MiSpecialPoolExtraCount
= SpecialPoolPtes
- PTE_PER_PAGE
;
182 /* Mark the previous PTE as the last one */
183 MiSpecialPoolLastPte
= PointerPte
- 2;
184 MiSpecialPoolLastPte
->u
.List
.NextEntry
= MM_EMPTY_PTE_LIST
;
186 /* Save end address of the special pool */
187 MmSpecialPoolEnd
= MiPteToAddress(MiSpecialPoolLastPte
+ 1);
189 /* Calculate maximum non-paged part of the special pool */
190 MiSpecialPagesNonPagedMaximum
= MmResidentAvailablePages
>> 4;
192 /* And limit it if it turned out to be too big */
193 if (MmNumberOfPhysicalPages
> 0x3FFF)
194 MiSpecialPagesNonPagedMaximum
= MmResidentAvailablePages
>> 3;
196 DPRINT1("Special pool start %p - end %p\n", MmSpecialPoolStart
, MmSpecialPoolEnd
);
197 ExpPoolFlags
|= POOL_FLAG_SPECIAL_POOL
;
199 //MiTestSpecialPool();
204 MmExpandSpecialPool(VOID
)
209 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL
);
211 if (MiSpecialPoolExtraCount
== 0)
212 return STATUS_INSUFFICIENT_RESOURCES
;
214 PointerPte
= MiSpecialPoolExtra
;
215 ASSERT(MiSpecialPoolFirstPte
== MiSpecialPoolLastPte
);
216 ASSERT(MiSpecialPoolFirstPte
->u
.List
.NextEntry
== MM_EMPTY_PTE_LIST
);
217 MiSpecialPoolFirstPte
->u
.List
.NextEntry
= PointerPte
- MmSystemPteBase
;
219 ASSERT(MiSpecialPoolExtraCount
>= PTE_PER_PAGE
);
220 for (i
= 0; i
< PTE_PER_PAGE
/ 2; i
++)
222 /* Point it to the next entry */
223 PointerPte
->u
.List
.NextEntry
= &PointerPte
[2] - MmSystemPteBase
;
225 /* Move to the next pair */
229 /* Save remaining extra values */
230 MiSpecialPoolExtra
= PointerPte
;
231 MiSpecialPoolExtraCount
-= PTE_PER_PAGE
;
233 /* Mark the previous PTE as the last one */
234 MiSpecialPoolLastPte
= PointerPte
- 2;
235 MiSpecialPoolLastPte
->u
.List
.NextEntry
= MM_EMPTY_PTE_LIST
;
237 /* Save new end address of the special pool */
238 MmSpecialPoolEnd
= MiPteToAddress(MiSpecialPoolLastPte
+ 1);
240 return STATUS_SUCCESS
;
245 MmAllocateSpecialPool(SIZE_T NumberOfBytes
, ULONG Tag
, POOL_TYPE PoolType
, ULONG SpecialType
)
248 MMPTE TempPte
= ValidKernelPte
;
250 PFN_NUMBER PageFrameNumber
;
251 LARGE_INTEGER TickCount
;
254 PFN_COUNT PagesInUse
;
256 DPRINT("MmAllocateSpecialPool(%x %x %x %x)\n", NumberOfBytes
, Tag
, PoolType
, SpecialType
);
258 /* Check if the pool is initialized and quit if it's not */
259 if (!MiSpecialPoolFirstPte
) return NULL
;
261 /* Get the pool type */
262 PoolType
&= BASE_POOL_TYPE_MASK
;
264 /* Check whether current IRQL matches the pool type */
265 Irql
= KeGetCurrentIrql();
267 if (((PoolType
== PagedPool
) && (Irql
> APC_LEVEL
)) ||
268 ((PoolType
!= PagedPool
) && (Irql
> DISPATCH_LEVEL
)))
271 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION
,
278 /* TODO: Take into account various limitations */
280 /* Heed the maximum limit of nonpaged pages */
281 if ((PoolType
== NonPagedPool
) &&
282 (MiSpecialPagesNonPaged
> MiSpecialPagesNonPagedMaximum
))
287 /* Lock PFN database */
288 Irql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
290 /* Reject allocation in case amount of available pages is too small */
291 if (MmAvailablePages
< 0x100)
293 /* Release the PFN database lock */
294 KeReleaseQueuedSpinLock(LockQueuePfnLock
, Irql
);
295 DPRINT1("Special pool: MmAvailablePages 0x%x is too small\n", MmAvailablePages
);
299 /* Check if special pool PTE list is exhausted */
300 if (MiSpecialPoolFirstPte
->u
.List
.NextEntry
== MM_EMPTY_PTE_LIST
)
302 /* Try to expand it */
303 if (!NT_SUCCESS(MmExpandSpecialPool()))
305 /* No reserves left, reject this allocation */
307 KeReleaseQueuedSpinLock(LockQueuePfnLock
, Irql
);
308 if (!once
++) DPRINT1("Special pool: No PTEs left!\n");
311 ASSERT(MiSpecialPoolFirstPte
->u
.List
.NextEntry
!= MM_EMPTY_PTE_LIST
);
314 /* Save allocation time */
315 KeQueryTickCount(&TickCount
);
317 /* Get a pointer to the first PTE */
318 PointerPte
= MiSpecialPoolFirstPte
;
320 /* Set the first PTE pointer to the next one in the list */
321 MiSpecialPoolFirstPte
= MmSystemPteBase
+ PointerPte
->u
.List
.NextEntry
;
323 /* Allocate a physical page */
324 PageFrameNumber
= MiRemoveAnyPage(MI_GET_NEXT_COLOR());
326 /* Initialize PFN and make it valid */
327 TempPte
.u
.Hard
.PageFrameNumber
= PageFrameNumber
;
328 MiInitializePfnAndMakePteValid(PageFrameNumber
, PointerPte
, TempPte
);
330 /* Release the PFN database lock */
331 KeReleaseQueuedSpinLock(LockQueuePfnLock
, Irql
);
333 /* Increase page counter */
334 PagesInUse
= InterlockedIncrementUL(&MmSpecialPagesInUse
);
335 if (PagesInUse
> MmSpecialPagesInUsePeak
)
336 MmSpecialPagesInUsePeak
= PagesInUse
;
338 /* Put some content into the page. Low value of tick count would do */
339 Entry
= MiPteToAddress(PointerPte
);
340 RtlFillMemory(Entry
, PAGE_SIZE
, TickCount
.LowPart
);
342 /* Calculate header and entry addresses */
343 if ((SpecialType
!= 0) &&
344 ((SpecialType
== 1) || (!MmSpecialPoolCatchOverruns
)))
346 /* We catch underruns. Data is at the beginning of the page */
347 Header
= (PPOOL_HEADER
)((PUCHAR
)Entry
+ PAGE_SIZE
- sizeof(POOL_HEADER
));
351 /* We catch overruns. Data is at the end of the page */
352 Header
= (PPOOL_HEADER
)Entry
;
353 Entry
= (PVOID
)((ULONG_PTR
)((PUCHAR
)Entry
- NumberOfBytes
+ PAGE_SIZE
) & ~((LONG_PTR
)sizeof(POOL_HEADER
) - 1));
356 /* Initialize the header */
357 RtlZeroMemory(Header
, sizeof(POOL_HEADER
));
359 /* Save allocation size there */
360 Header
->Ulong1
= (ULONG
)NumberOfBytes
;
362 /* Make sure it's all good */
363 ASSERT((NumberOfBytes
<= PAGE_SIZE
- sizeof(POOL_HEADER
)) &&
364 (PAGE_SIZE
<= 32 * 1024));
366 /* Mark it as paged or nonpaged */
367 if (PoolType
== PagedPool
)
369 /* Add pagedpool flag into the pool header too */
370 Header
->Ulong1
|= SPECIAL_POOL_PAGED
;
372 /* Also mark the next PTE as special-pool-paged */
373 PointerPte
[1].u
.Soft
.PageFileHigh
|= SPECIAL_POOL_PAGED_PTE
;
375 /* Increase pagable counter */
376 PagesInUse
= InterlockedIncrementUL(&MiSpecialPagesPagable
);
377 if (PagesInUse
> MiSpecialPagesPagablePeak
)
378 MiSpecialPagesPagablePeak
= PagesInUse
;
382 /* Mark the next PTE as special-pool-nonpaged */
383 PointerPte
[1].u
.Soft
.PageFileHigh
|= SPECIAL_POOL_NONPAGED_PTE
;
385 /* Increase nonpaged counter */
386 PagesInUse
= InterlockedIncrementUL(&MiSpecialPagesNonPaged
);
387 if (PagesInUse
> MiSpecialPagesNonPagedPeak
)
388 MiSpecialPagesNonPagedPeak
= PagesInUse
;
391 /* Finally save tag and put allocation time into the header's blocksize.
392 That time will be used to check memory consistency within the allocated
394 Header
->PoolTag
= Tag
;
395 Header
->BlockSize
= (UCHAR
)TickCount
.LowPart
;
396 DPRINT("%p\n", Entry
);
402 MiSpecialPoolCheckPattern(PUCHAR P
, PPOOL_HEADER Header
)
404 ULONG BytesToCheck
, BytesRequested
, Index
;
407 /* Get amount of bytes user requested to be allocated by clearing out the paged mask */
408 BytesRequested
= (Header
->Ulong1
& ~SPECIAL_POOL_PAGED
) & 0xFFFF;
409 ASSERT(BytesRequested
<= PAGE_SIZE
- sizeof(POOL_HEADER
));
411 /* Get a pointer to the end of user's area */
412 Ptr
= P
+ BytesRequested
;
414 /* Calculate how many bytes to check */
415 BytesToCheck
= (ULONG
)((PUCHAR
)PAGE_ALIGN(P
) + PAGE_SIZE
- Ptr
);
417 /* Remove pool header size if we're catching underruns */
418 if (((ULONG_PTR
)P
& (PAGE_SIZE
- 1)) == 0)
420 /* User buffer is located in the beginning of the page */
421 BytesToCheck
-= sizeof(POOL_HEADER
);
424 /* Check the pattern after user buffer */
425 for (Index
= 0; Index
< BytesToCheck
; Index
++)
427 /* Bugcheck if bytes don't match */
428 if (Ptr
[Index
] != Header
->BlockSize
)
430 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION
,
432 (ULONG_PTR
)&Ptr
[Index
],
441 MmFreeSpecialPool(PVOID P
)
445 BOOLEAN Overruns
= FALSE
;
446 KIRQL Irql
= KeGetCurrentIrql();
448 ULONG BytesRequested
, BytesReal
= 0;
451 PMI_FREED_SPECIAL_POOL FreedHeader
;
452 LARGE_INTEGER TickCount
;
455 DPRINT("MmFreeSpecialPool(%p)\n", P
);
458 PointerPte
= MiAddressToPte(P
);
460 /* Check if it's valid */
461 if (PointerPte
->u
.Hard
.Valid
== 0)
463 /* Bugcheck if it has NOACCESS or 0 set as protection */
464 if (PointerPte
->u
.Soft
.Protection
== MM_NOACCESS
||
465 !PointerPte
->u
.Soft
.Protection
)
467 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION
,
469 (ULONG_PTR
)PointerPte
,
475 /* Determine if it's a underruns or overruns pool pointer */
476 PtrOffset
= (ULONG
)((ULONG_PTR
)P
& (PAGE_SIZE
- 1));
479 /* Pool catches overruns */
480 Header
= PAGE_ALIGN(P
);
485 /* Pool catches underruns */
486 Header
= (PPOOL_HEADER
)((PUCHAR
)PAGE_ALIGN(P
) + PAGE_SIZE
- sizeof(POOL_HEADER
));
489 /* Check if it's non paged pool */
490 if ((Header
->Ulong1
& SPECIAL_POOL_PAGED
) == 0)
492 /* Non-paged allocation, ensure that IRQ is not higher that DISPATCH */
493 PoolType
= NonPagedPool
;
494 ASSERT(PointerPte
[1].u
.Soft
.PageFileHigh
== SPECIAL_POOL_NONPAGED_PTE
);
495 if (Irql
> DISPATCH_LEVEL
)
497 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION
,
506 /* Paged allocation, ensure */
507 PoolType
= PagedPool
;
508 ASSERT(PointerPte
[1].u
.Soft
.PageFileHigh
== SPECIAL_POOL_PAGED_PTE
);
509 if (Irql
> APC_LEVEL
)
511 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION
,
519 /* Get amount of bytes user requested to be allocated by clearing out the paged mask */
520 BytesRequested
= (Header
->Ulong1
& ~SPECIAL_POOL_PAGED
) & 0xFFFF;
521 ASSERT(BytesRequested
<= PAGE_SIZE
- sizeof(POOL_HEADER
));
523 /* Check memory before the allocated user buffer in case of overruns detection */
526 /* Calculate the real placement of the buffer */
527 BytesReal
= PAGE_SIZE
- PtrOffset
;
529 /* If they mismatch, it's unrecoverable */
530 if (BytesRequested
> BytesReal
)
532 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION
,
539 if (BytesRequested
+ sizeof(POOL_HEADER
) < BytesReal
)
541 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION
,
548 /* Actually check the memory pattern */
549 for (b
= (PUCHAR
)(Header
+ 1); b
< (PUCHAR
)P
; b
++)
551 if (*b
!= Header
->BlockSize
)
554 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION
,
563 /* Check the memory pattern after the user buffer */
564 MiSpecialPoolCheckPattern(P
, Header
);
566 /* Fill the freed header */
567 KeQueryTickCount(&TickCount
);
568 FreedHeader
= (PMI_FREED_SPECIAL_POOL
)PAGE_ALIGN(P
);
569 FreedHeader
->Signature
= 0x98764321;
570 FreedHeader
->TickCount
= TickCount
.LowPart
;
571 FreedHeader
->NumberOfBytesRequested
= BytesRequested
;
572 FreedHeader
->Pagable
= PoolType
;
573 FreedHeader
->VirtualAddress
= P
;
574 FreedHeader
->Thread
= PsGetCurrentThread();
575 /* TODO: Fill StackPointer and StackBytes */
576 FreedHeader
->StackPointer
= NULL
;
577 FreedHeader
->StackBytes
= 0;
579 if (PoolType
== NonPagedPool
)
581 /* Non pagable. Get PFN element corresponding to the PTE */
582 Pfn
= MI_PFN_ELEMENT(PointerPte
->u
.Hard
.PageFrameNumber
);
584 /* Count the page as free */
585 InterlockedDecrementUL(&MiSpecialPagesNonPaged
);
587 /* Lock PFN database */
588 Irql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
590 /* Delete this PFN */
591 MI_SET_PFN_DELETED(Pfn
);
593 /* Decrement share count of this PFN */
594 MiDecrementShareCount(Pfn
, PointerPte
->u
.Hard
.PageFrameNumber
);
596 MI_ERASE_PTE(PointerPte
);
599 //FIXME: Use KeFlushSingleTb() instead
600 KeFlushEntireTb(TRUE
, TRUE
);
604 /* Pagable. Delete that virtual address */
605 MiDeleteSystemPageableVm(PointerPte
, 1, 0, NULL
);
607 /* Count the page as free */
608 InterlockedDecrementUL(&MiSpecialPagesPagable
);
610 /* Lock PFN database */
611 Irql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
614 /* Mark next PTE as invalid */
615 MI_ERASE_PTE(PointerPte
+ 1);
617 /* Make sure that the last entry is really the last one */
618 ASSERT(MiSpecialPoolLastPte
->u
.List
.NextEntry
== MM_EMPTY_PTE_LIST
);
620 /* Update the current last PTE next pointer */
621 MiSpecialPoolLastPte
->u
.List
.NextEntry
= PointerPte
- MmSystemPteBase
;
623 /* PointerPte becomes the new last PTE */
624 PointerPte
->u
.List
.NextEntry
= MM_EMPTY_PTE_LIST
;
625 MiSpecialPoolLastPte
= PointerPte
;
627 /* Release the PFN database lock */
628 KeReleaseQueuedSpinLock(LockQueuePfnLock
, Irql
);
630 /* Update page counter */
631 InterlockedDecrementUL(&MmSpecialPagesInUse
);
636 MiTestSpecialPool(VOID
)
642 POOL_TYPE PoolType
= PagedPool
;
644 // First allocate/free
645 for (i
=0; i
<100; i
++)
647 ByteSize
= (100 * (i
+1)) % (PAGE_SIZE
- sizeof(POOL_HEADER
));
648 p1
= MmAllocateSpecialPool(ByteSize
, 'TEST', PoolType
, 0);
649 DPRINT1("p1 %p size %lu\n", p1
, ByteSize
);
650 MmFreeSpecialPool(p1
);
653 // Now allocate all at once, then free at once
654 for (i
=0; i
<100; i
++)
656 ByteSize
= (100 * (i
+1)) % (PAGE_SIZE
- sizeof(POOL_HEADER
));
657 p2
[i
] = MmAllocateSpecialPool(ByteSize
, 'TEST', PoolType
, 0);
658 DPRINT1("p2[%lu] %p size %lu\n", i
, p1
, ByteSize
);
660 for (i
=0; i
<100; i
++)
662 DPRINT1("Freeing %p\n", p2
[i
]);
663 MmFreeSpecialPool(p2
[i
]);
666 // Overrun the buffer to test
668 //p3 = MmAllocateSpecialPool(ByteSize, 'TEST', NonPagedPool, 0);
669 //p3[ByteSize] = 0xF1; // This should cause an exception
671 // Underrun the buffer to test
672 //p3 = MmAllocateSpecialPool(ByteSize, 'TEST', NonPagedPool, 1);
674 //*p3 = 0xF1; // This should cause an exception