2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/special.c
5 * PURPOSE: ARM Memory Manager Special Pool implementation
6 * PROGRAMMERS: ReactOS Portable Systems Group
11 http://msdn.microsoft.com/en-us/library/ff551832(v=VS.85).aspx
14 /* INCLUDES *******************************************************************/
20 #define MODULE_INVOLVED_IN_ARM3
21 #include <mm/ARM3/miarm.h>
23 extern ULONG ExpPoolFlags
;
24 extern PMMPTE MmSystemPteBase
;
28 MiReserveAlignedSystemPtes(IN ULONG NumberOfPtes
,
29 IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType
,
32 /* GLOBALS ********************************************************************/
34 #define SPECIAL_POOL_PAGED_PTE 0x2000
35 #define SPECIAL_POOL_NONPAGED_PTE 0x4000
36 #define SPECIAL_POOL_PAGED 0x8000
38 PVOID MmSpecialPoolStart
;
39 PVOID MmSpecialPoolEnd
;
40 PVOID MiSpecialPoolExtra
;
41 ULONG MiSpecialPoolExtraCount
;
43 PMMPTE MiSpecialPoolFirstPte
;
44 PMMPTE MiSpecialPoolLastPte
;
46 PFN_NUMBER MiSpecialPagesNonPagedMaximum
;
48 BOOLEAN MmSpecialPoolCatchOverruns
= TRUE
;
50 typedef struct _MI_FREED_SPECIAL_POOL
52 POOL_HEADER OverlaidPoolHeader
;
53 /* TODO: Add overlaid verifier pool header */
56 ULONG NumberOfBytesRequested
;
62 UCHAR StackData
[0x400];
63 } MI_FREED_SPECIAL_POOL
, *PMI_FREED_SPECIAL_POOL
;
65 /* PRIVATE FUNCTIONS **********************************************************/
67 VOID NTAPI
MiTestSpecialPool();
71 MmUseSpecialPool(SIZE_T NumberOfBytes
, ULONG Tag
)
73 /* Special pool is not suitable for allocations bigger than 1 page */
74 if (NumberOfBytes
> (PAGE_SIZE
- sizeof(POOL_HEADER
)))
77 return Tag
== MmSpecialPoolTag
;
82 MmIsSpecialPoolAddress(PVOID P
)
84 return ((P
>= MmSpecialPoolStart
) &&
85 (P
<= MmSpecialPoolEnd
));
90 MmIsSpecialPoolAddressFree(PVOID P
)
94 ASSERT(MmIsSpecialPoolAddress(P
));
95 PointerPte
= MiAddressToPte(P
);
97 if (PointerPte
->u
.Soft
.PageFileHigh
== SPECIAL_POOL_PAGED_PTE
||
98 PointerPte
->u
.Soft
.PageFileHigh
== SPECIAL_POOL_NONPAGED_PTE
)
110 MiInitializeSpecialPool(VOID
)
112 ULONG SpecialPoolPtes
, i
;
115 /* Check if there is a special pool tag */
116 if ((MmSpecialPoolTag
== 0) ||
117 (MmSpecialPoolTag
== -1)) return;
119 /* Calculate number of system PTEs for the special pool */
120 if (MmNumberOfSystemPtes
>= 0x3000)
121 SpecialPoolPtes
= MmNumberOfSystemPtes
/ 3;
123 SpecialPoolPtes
= MmNumberOfSystemPtes
/ 6;
125 /* Don't let the number go too high */
126 if (SpecialPoolPtes
> 0x6000) SpecialPoolPtes
= 0x6000;
128 /* Round up to the page size */
129 SpecialPoolPtes
= PAGE_ROUND_UP(SpecialPoolPtes
);
131 ASSERT((SpecialPoolPtes
& (PTE_PER_PAGE
- 1)) == 0);
133 /* Reserve those PTEs */
136 PointerPte
= MiReserveAlignedSystemPtes(SpecialPoolPtes
, 0, /*0x400000*/0); // FIXME:
137 if (PointerPte
) break;
139 /* Reserving didn't work, so try to reduce the requested size */
140 ASSERT(SpecialPoolPtes
>= PTE_PER_PAGE
);
141 SpecialPoolPtes
-= PTE_PER_PAGE
;
142 } while (SpecialPoolPtes
);
144 /* Fail if we couldn't reserve them at all */
145 if (!SpecialPoolPtes
) return;
147 /* Make sure we got enough */
148 ASSERT(SpecialPoolPtes
>= PTE_PER_PAGE
);
150 /* Save first PTE and its address */
151 MiSpecialPoolFirstPte
= PointerPte
;
152 MmSpecialPoolStart
= MiPteToAddress(PointerPte
);
154 for (i
= 0; i
< PTE_PER_PAGE
/ 2; i
++)
156 /* Point it to the next entry */
157 PointerPte
->u
.List
.NextEntry
= &PointerPte
[2] - MmSystemPteBase
;
159 /* Move to the next pair */
163 /* Save extra values */
164 MiSpecialPoolExtra
= PointerPte
;
165 MiSpecialPoolExtraCount
= SpecialPoolPtes
- PTE_PER_PAGE
;
167 /* Mark the previous PTE as the last one */
168 MiSpecialPoolLastPte
= PointerPte
- 2;
169 MiSpecialPoolLastPte
->u
.List
.NextEntry
= MM_EMPTY_PTE_LIST
;
171 /* Save end address of the special pool */
172 MmSpecialPoolEnd
= MiPteToAddress(MiSpecialPoolLastPte
+ 1);
174 /* Calculate maximum non-paged part of the special pool */
175 MiSpecialPagesNonPagedMaximum
= MmResidentAvailablePages
>> 4;
177 /* And limit it if it turned out to be too big */
178 if (MmNumberOfPhysicalPages
> 0x3FFF)
179 MiSpecialPagesNonPagedMaximum
= MmResidentAvailablePages
>> 3;
181 DPRINT1("Special pool start %p - end %p\n", MmSpecialPoolStart
, MmSpecialPoolEnd
);
182 ExpPoolFlags
|= POOL_FLAG_SPECIAL_POOL
;
184 //MiTestSpecialPool();
189 MmExpandSpecialPool(VOID
)
194 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL
);
196 if (MiSpecialPoolExtraCount
== 0)
197 return STATUS_INSUFFICIENT_RESOURCES
;
199 PointerPte
= MiSpecialPoolExtra
;
200 ASSERT(MiSpecialPoolFirstPte
== MiSpecialPoolLastPte
);
201 ASSERT(MiSpecialPoolFirstPte
->u
.List
.NextEntry
== MM_EMPTY_PTE_LIST
);
202 MiSpecialPoolFirstPte
->u
.List
.NextEntry
= PointerPte
- MmSystemPteBase
;
204 ASSERT(MiSpecialPoolExtraCount
>= PTE_PER_PAGE
);
205 for (i
= 0; i
< PTE_PER_PAGE
/ 2; i
++)
207 /* Point it to the next entry */
208 PointerPte
->u
.List
.NextEntry
= &PointerPte
[2] - MmSystemPteBase
;
210 /* Move to the next pair */
214 /* Save remaining extra values */
215 MiSpecialPoolExtra
= PointerPte
;
216 MiSpecialPoolExtraCount
-= PTE_PER_PAGE
;
218 /* Mark the previous PTE as the last one */
219 MiSpecialPoolLastPte
= PointerPte
- 2;
220 MiSpecialPoolLastPte
->u
.List
.NextEntry
= MM_EMPTY_PTE_LIST
;
222 /* Save new end address of the special pool */
223 MmSpecialPoolEnd
= MiPteToAddress(MiSpecialPoolLastPte
+ 1);
225 return STATUS_SUCCESS
;
230 MmAllocateSpecialPool(SIZE_T NumberOfBytes
, ULONG Tag
, POOL_TYPE PoolType
, ULONG SpecialType
)
233 MMPTE TempPte
= ValidKernelPte
;
235 PFN_NUMBER PageFrameNumber
;
236 LARGE_INTEGER TickCount
;
240 DPRINT("MmAllocateSpecialPool(%x %x %x %x)\n", NumberOfBytes
, Tag
, PoolType
, SpecialType
);
242 /* Check if the pool is initialized and quit if it's not */
243 if (!MiSpecialPoolFirstPte
) return NULL
;
245 /* Get the pool type */
246 PoolType
&= BASE_POOL_TYPE_MASK
;
248 /* Check whether current IRQL matches the pool type */
249 Irql
= KeGetCurrentIrql();
251 if (((PoolType
== PagedPool
) && (Irql
> APC_LEVEL
)) ||
252 ((PoolType
!= PagedPool
) && (Irql
> DISPATCH_LEVEL
)))
255 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION
,
262 /* TODO: Take into account various limitations */
263 /*if ((PoolType != NonPagedPool) &&
264 MiSpecialPagesNonPaged > MiSpecialPagesNonPagedMaximum)*/
266 /* Lock PFN database */
267 Irql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
269 /* Reject allocation in case amount of available pages is too small */
270 if (MmAvailablePages
< 0x100)
272 /* Release the PFN database lock */
273 KeReleaseQueuedSpinLock(LockQueuePfnLock
, Irql
);
274 DPRINT1("Special pool: MmAvailablePages 0x%x is too small\n", MmAvailablePages
);
278 /* Check if special pool PTE list is exhausted */
279 if (MiSpecialPoolFirstPte
->u
.List
.NextEntry
== MM_EMPTY_PTE_LIST
)
281 /* Try to expand it */
282 if (!NT_SUCCESS(MmExpandSpecialPool()))
284 /* No reserves left, reject this allocation */
286 KeReleaseQueuedSpinLock(LockQueuePfnLock
, Irql
);
287 if (!once
++) DPRINT1("Special pool: No PTEs left!\n");
290 ASSERT(MiSpecialPoolFirstPte
->u
.List
.NextEntry
!= MM_EMPTY_PTE_LIST
);
293 /* Save allocation time */
294 KeQueryTickCount(&TickCount
);
296 /* Get a pointer to the first PTE */
297 PointerPte
= MiSpecialPoolFirstPte
;
299 /* Set the first PTE pointer to the next one in the list */
300 MiSpecialPoolFirstPte
= MmSystemPteBase
+ PointerPte
->u
.List
.NextEntry
;
302 /* Allocate a physical page */
303 PageFrameNumber
= MiRemoveAnyPage(MI_GET_NEXT_COLOR());
305 /* Initialize PFN and make it valid */
306 TempPte
.u
.Hard
.PageFrameNumber
= PageFrameNumber
;
307 MiInitializePfnAndMakePteValid(PageFrameNumber
, PointerPte
, TempPte
);
309 /* Release the PFN database lock */
310 KeReleaseQueuedSpinLock(LockQueuePfnLock
, Irql
);
312 /* Put some content into the page. Low value of tick count would do */
313 Entry
= MiPteToAddress(PointerPte
);
314 RtlFillMemory(Entry
, PAGE_SIZE
, TickCount
.LowPart
);
316 /* Calculate header and entry addresses */
317 if ((SpecialType
!= 0) &&
318 ((SpecialType
== 1) || (!MmSpecialPoolCatchOverruns
)))
320 /* We catch underruns. Data is at the beginning of the page */
321 Header
= (PPOOL_HEADER
)((PUCHAR
)Entry
+ PAGE_SIZE
- sizeof(POOL_HEADER
));
325 /* We catch overruns. Data is at the end of the page */
326 Header
= (PPOOL_HEADER
)Entry
;
327 Entry
= (PVOID
)((ULONG_PTR
)((PUCHAR
)Entry
- NumberOfBytes
+ PAGE_SIZE
) & ~((LONG_PTR
)sizeof(POOL_HEADER
) - 1));
330 /* Initialize the header */
331 RtlZeroMemory(Header
, sizeof(POOL_HEADER
));
333 /* Save allocation size there */
334 Header
->Ulong1
= (ULONG
)NumberOfBytes
;
336 /* Make sure it's all good */
337 ASSERT((NumberOfBytes
<= PAGE_SIZE
- sizeof(POOL_HEADER
)) &&
338 (PAGE_SIZE
<= 32 * 1024));
340 /* Mark it as paged or nonpaged */
341 if (PoolType
== PagedPool
)
343 /* Add pagedpool flag into the pool header too */
344 Header
->Ulong1
|= SPECIAL_POOL_PAGED
;
346 /* Also mark the next PTE as special-pool-paged */
347 PointerPte
[1].u
.Soft
.PageFileHigh
|= SPECIAL_POOL_PAGED_PTE
;
351 /* Mark the next PTE as special-pool-nonpaged */
352 PointerPte
[1].u
.Soft
.PageFileHigh
|= SPECIAL_POOL_NONPAGED_PTE
;
355 /* Finally save tag and put allocation time into the header's blocksize.
356 That time will be used to check memory consistency within the allocated
358 Header
->PoolTag
= Tag
;
359 Header
->BlockSize
= (UCHAR
)TickCount
.LowPart
;
360 DPRINT("%p\n", Entry
);
366 MiSpecialPoolCheckPattern(PUCHAR P
, PPOOL_HEADER Header
)
368 ULONG BytesToCheck
, BytesRequested
, Index
;
371 /* Get amount of bytes user requested to be allocated by clearing out the paged mask */
372 BytesRequested
= (Header
->Ulong1
& ~SPECIAL_POOL_PAGED
) & 0xFFFF;
373 ASSERT(BytesRequested
<= PAGE_SIZE
- sizeof(POOL_HEADER
));
375 /* Get a pointer to the end of user's area */
376 Ptr
= P
+ BytesRequested
;
378 /* Calculate how many bytes to check */
379 BytesToCheck
= (ULONG
)((PUCHAR
)PAGE_ALIGN(P
) + PAGE_SIZE
- Ptr
);
381 /* Remove pool header size if we're catching underruns */
382 if (((ULONG_PTR
)P
& (PAGE_SIZE
- 1)) == 0)
384 /* User buffer is located in the beginning of the page */
385 BytesToCheck
-= sizeof(POOL_HEADER
);
388 /* Check the pattern after user buffer */
389 for (Index
= 0; Index
< BytesToCheck
; Index
++)
391 /* Bugcheck if bytes don't match */
392 if (Ptr
[Index
] != Header
->BlockSize
)
394 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION
,
396 (ULONG_PTR
)&Ptr
[Index
],
405 MmFreeSpecialPool(PVOID P
)
409 BOOLEAN Overruns
= FALSE
;
410 KIRQL Irql
= KeGetCurrentIrql();
412 ULONG BytesRequested
, BytesReal
= 0;
415 PMI_FREED_SPECIAL_POOL FreedHeader
;
416 LARGE_INTEGER TickCount
;
419 DPRINT("MmFreeSpecialPool(%p)\n", P
);
422 PointerPte
= MiAddressToPte(P
);
424 /* Check if it's valid */
425 if (PointerPte
->u
.Hard
.Valid
== 0)
427 /* Bugcheck if it has NOACCESS or 0 set as protection */
428 if (PointerPte
->u
.Soft
.Protection
== MM_NOACCESS
||
429 !PointerPte
->u
.Soft
.Protection
)
431 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION
,
433 (ULONG_PTR
)PointerPte
,
439 /* Determine if it's a underruns or overruns pool pointer */
440 PtrOffset
= (ULONG
)((ULONG_PTR
)P
& (PAGE_SIZE
- 1));
443 /* Pool catches overruns */
444 Header
= PAGE_ALIGN(P
);
449 /* Pool catches underruns */
450 Header
= (PPOOL_HEADER
)((PUCHAR
)PAGE_ALIGN(P
) + PAGE_SIZE
- sizeof(POOL_HEADER
));
453 /* Check if it's non paged pool */
454 if ((Header
->Ulong1
& SPECIAL_POOL_PAGED
) == 0)
456 /* Non-paged allocation, ensure that IRQ is not higher that DISPATCH */
457 PoolType
= NonPagedPool
;
458 ASSERT(PointerPte
[1].u
.Soft
.PageFileHigh
== SPECIAL_POOL_NONPAGED_PTE
);
459 if (Irql
> DISPATCH_LEVEL
)
461 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION
,
470 /* Paged allocation, ensure */
471 PoolType
= PagedPool
;
472 ASSERT(PointerPte
[1].u
.Soft
.PageFileHigh
== SPECIAL_POOL_PAGED_PTE
);
473 if (Irql
> APC_LEVEL
)
475 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION
,
483 /* Get amount of bytes user requested to be allocated by clearing out the paged mask */
484 BytesRequested
= (Header
->Ulong1
& ~SPECIAL_POOL_PAGED
) & 0xFFFF;
485 ASSERT(BytesRequested
<= PAGE_SIZE
- sizeof(POOL_HEADER
));
487 /* Check memory before the allocated user buffer in case of overruns detection */
490 /* Calculate the real placement of the buffer */
491 BytesReal
= PAGE_SIZE
- PtrOffset
;
493 /* If they mismatch, it's unrecoverable */
494 if (BytesRequested
> BytesReal
)
496 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION
,
503 if (BytesRequested
+ sizeof(POOL_HEADER
) < BytesReal
)
505 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION
,
512 /* Actually check the memory pattern */
513 for (b
= (PUCHAR
)(Header
+ 1); b
< (PUCHAR
)P
; b
++)
515 if (*b
!= Header
->BlockSize
)
518 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION
,
527 /* Check the memory pattern after the user buffer */
528 MiSpecialPoolCheckPattern(P
, Header
);
530 /* Fill the freed header */
531 KeQueryTickCount(&TickCount
);
532 FreedHeader
= (PMI_FREED_SPECIAL_POOL
)PAGE_ALIGN(P
);
533 FreedHeader
->Signature
= 0x98764321;
534 FreedHeader
->TickCount
= TickCount
.LowPart
;
535 FreedHeader
->NumberOfBytesRequested
= BytesRequested
;
536 FreedHeader
->Pagable
= PoolType
;
537 FreedHeader
->VirtualAddress
= P
;
538 FreedHeader
->Thread
= PsGetCurrentThread();
539 /* TODO: Fill StackPointer and StackBytes */
540 FreedHeader
->StackPointer
= NULL
;
541 FreedHeader
->StackBytes
= 0;
543 if (PoolType
== NonPagedPool
)
545 /* Non pagable. Get PFN element corresponding to the PTE */
546 Pfn
= MI_PFN_ELEMENT(PointerPte
->u
.Hard
.PageFrameNumber
);
548 /* Lock PFN database */
549 Irql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
551 /* Delete this PFN */
552 MI_SET_PFN_DELETED(Pfn
);
554 /* Decrement share count of this PFN */
555 MiDecrementShareCount(Pfn
, PointerPte
->u
.Hard
.PageFrameNumber
);
557 MI_ERASE_PTE(PointerPte
);
560 //FIXME: Use KeFlushSingleTb() instead
561 KeFlushEntireTb(TRUE
, TRUE
);
565 /* Pagable. Delete that virtual address */
566 MiDeleteSystemPageableVm(PointerPte
, 1, 0, NULL
);
568 /* Lock PFN database */
569 Irql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
572 /* Mark next PTE as invalid */
573 MI_ERASE_PTE(PointerPte
+ 1);
575 /* Make sure that the last entry is really the last one */
576 ASSERT(MiSpecialPoolLastPte
->u
.List
.NextEntry
== MM_EMPTY_PTE_LIST
);
578 /* Update the current last PTE next pointer */
579 MiSpecialPoolLastPte
->u
.List
.NextEntry
= PointerPte
- MmSystemPteBase
;
581 /* PointerPte becomes the new last PTE */
582 PointerPte
->u
.List
.NextEntry
= MM_EMPTY_PTE_LIST
;
583 MiSpecialPoolLastPte
= PointerPte
;
585 /* Release the PFN database lock */
586 KeReleaseQueuedSpinLock(LockQueuePfnLock
, Irql
);
591 MiTestSpecialPool(VOID
)
597 POOL_TYPE PoolType
= PagedPool
;
599 // First allocate/free
600 for (i
=0; i
<100; i
++)
602 ByteSize
= (100 * (i
+1)) % (PAGE_SIZE
- sizeof(POOL_HEADER
));
603 p1
= MmAllocateSpecialPool(ByteSize
, 'TEST', PoolType
, 0);
604 DPRINT1("p1 %p size %lu\n", p1
, ByteSize
);
605 MmFreeSpecialPool(p1
);
608 // Now allocate all at once, then free at once
609 for (i
=0; i
<100; i
++)
611 ByteSize
= (100 * (i
+1)) % (PAGE_SIZE
- sizeof(POOL_HEADER
));
612 p2
[i
] = MmAllocateSpecialPool(ByteSize
, 'TEST', PoolType
, 0);
613 DPRINT1("p2[%lu] %p size %lu\n", i
, p1
, ByteSize
);
615 for (i
=0; i
<100; i
++)
617 DPRINT1("Freeing %p\n", p2
[i
]);
618 MmFreeSpecialPool(p2
[i
]);
621 // Overrun the buffer to test
623 //p3 = MmAllocateSpecialPool(ByteSize, 'TEST', NonPagedPool, 0);
624 //p3[ByteSize] = 0xF1; // This should cause an exception
626 // Underrun the buffer to test
627 //p3 = MmAllocateSpecialPool(ByteSize, 'TEST', NonPagedPool, 1);
629 //*p3 = 0xF1; // This should cause an exception