2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/special.c
5 * PURPOSE: ARM Memory Manager Special Pool implementation
6 * PROGRAMMERS: ReactOS Portable Systems Group
11 http://msdn.microsoft.com/en-us/library/ff551832(v=VS.85).aspx
14 /* INCLUDES *******************************************************************/
20 #define MODULE_INVOLVED_IN_ARM3
21 #include "../ARM3/miarm.h"
23 extern PMMPTE MmSystemPteBase
;
27 MiReserveAlignedSystemPtes(IN ULONG NumberOfPtes
,
28 IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType
,
31 /* GLOBALS ********************************************************************/
33 #define SPECIAL_POOL_PAGED_PTE 0x2000
34 #define SPECIAL_POOL_NONPAGED_PTE 0x4000
35 #define SPECIAL_POOL_PAGED 0x8000
37 PVOID MmSpecialPoolStart
;
38 PVOID MmSpecialPoolEnd
;
39 PVOID MiSpecialPoolExtra
;
40 ULONG MiSpecialPoolExtraCount
;
42 PMMPTE MiSpecialPoolFirstPte
;
43 PMMPTE MiSpecialPoolLastPte
;
45 PFN_NUMBER MiSpecialPagesNonPagedMaximum
;
47 BOOLEAN MmSpecialPoolCatchOverruns
= TRUE
;
49 typedef struct _MI_FREED_SPECIAL_POOL
51 POOL_HEADER OverlaidPoolHeader
;
52 /* TODO: Add overlaid verifier pool header */
55 ULONG NumberOfBytesRequested
;
61 UCHAR StackData
[0x400];
62 } MI_FREED_SPECIAL_POOL
, *PMI_FREED_SPECIAL_POOL
;
64 /* PRIVATE FUNCTIONS **********************************************************/
66 VOID NTAPI
MiTestSpecialPool();
70 MmUseSpecialPool(SIZE_T NumberOfBytes
, ULONG Tag
)
72 /* Special pool is not suitable for allocations bigger than 1 page */
73 if (NumberOfBytes
> (PAGE_SIZE
- sizeof(POOL_HEADER
)))
83 MmIsSpecialPoolAddress(PVOID P
)
85 return((P
>= MmSpecialPoolStart
) &&
86 (P
<= MmSpecialPoolEnd
));
91 MiInitializeSpecialPool()
93 ULONG SpecialPoolPtes
, i
;
96 /* Check if there is a special pool tag */
97 if ((MmSpecialPoolTag
== 0) ||
98 (MmSpecialPoolTag
== -1)) return;
100 /* Calculate number of system PTEs for the special pool */
101 if ( MmNumberOfSystemPtes
>= 0x3000 )
102 SpecialPoolPtes
= MmNumberOfSystemPtes
/ 3;
104 SpecialPoolPtes
= MmNumberOfSystemPtes
/ 6;
106 /* Don't let the number go too high */
107 if (SpecialPoolPtes
> 0x6000) SpecialPoolPtes
= 0x6000;
109 /* Round up to the page size */
110 SpecialPoolPtes
= PAGE_ROUND_UP(SpecialPoolPtes
);
112 ASSERT((SpecialPoolPtes
& (PTE_PER_PAGE
- 1)) == 0);
114 /* Reserve those PTEs */
117 PointerPte
= MiReserveAlignedSystemPtes(SpecialPoolPtes
, 0, /*0x400000*/0); // FIXME:
118 if (PointerPte
) break;
120 /* Reserving didn't work, so try to reduce the requested size */
121 ASSERT(SpecialPoolPtes
>= PTE_PER_PAGE
);
122 SpecialPoolPtes
-= 1024;
123 } while (SpecialPoolPtes
);
125 /* Fail if we couldn't reserve them at all */
126 if (!SpecialPoolPtes
) return;
128 /* Make sure we got enough */
129 ASSERT(SpecialPoolPtes
>= PTE_PER_PAGE
);
131 /* Save first PTE and its address */
132 MiSpecialPoolFirstPte
= PointerPte
;
133 MmSpecialPoolStart
= MiPteToAddress(PointerPte
);
135 for (i
= 0; i
<512; i
++)
137 /* Point it to the next entry */
138 PointerPte
->u
.List
.NextEntry
= &PointerPte
[2] - MmSystemPteBase
;
140 /* Move to the next pair */
144 /* Save extra values */
145 MiSpecialPoolExtra
= PointerPte
;
146 MiSpecialPoolExtraCount
= SpecialPoolPtes
- 1024;
148 /* Mark the previous PTE as the last one */
149 MiSpecialPoolLastPte
= PointerPte
- 2;
150 MiSpecialPoolLastPte
->u
.List
.NextEntry
= MM_EMPTY_PTE_LIST
;
152 /* Save end address of the special pool */
153 MmSpecialPoolEnd
= MiPteToAddress(MiSpecialPoolLastPte
+ 1);
155 /* Calculate maximum non-paged part of the special pool */
156 MiSpecialPagesNonPagedMaximum
= MmResidentAvailablePages
>> 4;
158 /* And limit it if it turned out to be too big */
159 if (MmNumberOfPhysicalPages
> 0x3FFF)
160 MiSpecialPagesNonPagedMaximum
= MmResidentAvailablePages
>> 3;
162 DPRINT1("Special pool start %p - end %p\n", MmSpecialPoolStart
, MmSpecialPoolEnd
);
164 //MiTestSpecialPool();
169 MmAllocateSpecialPool(SIZE_T NumberOfBytes
, ULONG Tag
, POOL_TYPE PoolType
, ULONG SpecialType
)
172 MMPTE TempPte
= ValidKernelPte
;
174 PFN_NUMBER PageFrameNumber
;
175 LARGE_INTEGER TickCount
;
179 DPRINT1("MmAllocateSpecialPool(%x %x %x %x)\n", NumberOfBytes
, Tag
, PoolType
, SpecialType
);
181 /* Check if the pool is initialized and quit if it's not */
182 if (!MiSpecialPoolFirstPte
) return NULL
;
184 /* Get the pool type */
185 PoolType
&= BASE_POOL_TYPE_MASK
;
187 /* Check whether current IRQL matches the pool type */
188 Irql
= KeGetCurrentIrql();
190 if (((PoolType
== PagedPool
) && (Irql
> APC_LEVEL
)) ||
191 ((PoolType
!= PagedPool
) && (Irql
> DISPATCH_LEVEL
)))
194 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION
, Irql
, PoolType
, NumberOfBytes
, 0x30);
197 /* TODO: Take into account various limitations */
198 /*if ((PoolType != NonPagedPool) &&
199 MiSpecialPagesNonPaged > MiSpecialPagesNonPagedMaximum)*/
201 /* Lock PFN database */
202 Irql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
204 /* Reject allocation in case amount of available pages is too small */
205 if (MmAvailablePages
< 0x100)
207 /* Release the PFN database lock */
208 KeReleaseQueuedSpinLock(LockQueuePfnLock
, Irql
);
209 DPRINT1("Special pool: MmAvailablePages 0x%x is too small\n", MmAvailablePages
);
213 /* Reject allocation if special pool PTE list is exhausted */
214 if (MiSpecialPoolFirstPte
->u
.List
.NextEntry
== MM_EMPTY_PTE_LIST
)
216 /* Release the PFN database lock */
217 KeReleaseQueuedSpinLock(LockQueuePfnLock
, Irql
);
218 DPRINT1("Special pool: No PTEs left!\n");
219 /* TODO: Expand the special pool */
223 /* Save allocation time */
224 KeQueryTickCount(&TickCount
);
226 /* Get a pointer to the first PTE */
227 PointerPte
= MiSpecialPoolFirstPte
;
229 /* Set the first PTE pointer to the next one in the list */
230 MiSpecialPoolFirstPte
= MmSystemPteBase
+ PointerPte
->u
.List
.NextEntry
;
232 /* Allocate a physical page */
233 PageFrameNumber
= MiRemoveAnyPage(MI_GET_NEXT_COLOR());
235 /* Initialize PFN and make it valid */
236 TempPte
.u
.Hard
.PageFrameNumber
= PageFrameNumber
;
237 MiInitializePfnAndMakePteValid(PageFrameNumber
, PointerPte
, TempPte
);
239 /* Release the PFN database lock */
240 KeReleaseQueuedSpinLock(LockQueuePfnLock
, Irql
);
242 /* Put some content into the page. Low value of tick count would do */
243 Entry
= MiPteToAddress(PointerPte
);
244 RtlFillMemory(Entry
, PAGE_SIZE
, TickCount
.LowPart
);
246 /* Calculate header and entry addresses */
247 if ((SpecialType
!= 0) &&
248 ((SpecialType
== 1) || (!MmSpecialPoolCatchOverruns
)))
250 /* We catch underruns. Data is at the beginning of the page */
251 Header
= (PPOOL_HEADER
)((PUCHAR
)Entry
+ PAGE_SIZE
- sizeof(POOL_HEADER
));
255 /* We catch overruns. Data is at the end of the page */
256 Header
= (PPOOL_HEADER
)Entry
;
257 Entry
= (PVOID
)((ULONG_PTR
)((PUCHAR
)Entry
- NumberOfBytes
+ PAGE_SIZE
) & ~((LONG_PTR
)sizeof(POOL_HEADER
) - 1));
260 /* Initialize the header */
261 RtlZeroMemory(Header
, sizeof(POOL_HEADER
));
263 /* Save allocation size there */
264 Header
->Ulong1
= (ULONG
)NumberOfBytes
;
266 /* Make sure it's all good */
267 ASSERT((NumberOfBytes
<= PAGE_SIZE
- sizeof(POOL_HEADER
)) &&
268 (PAGE_SIZE
<= 32 * 1024));
270 /* Mark it as paged or nonpaged */
271 if (PoolType
== PagedPool
)
273 /* Add pagedpool flag into the pool header too */
274 Header
->Ulong1
|= SPECIAL_POOL_PAGED
;
276 /* Also mark the next PTE as special-pool-paged */
277 PointerPte
[1].u
.Soft
.PageFileHigh
|= SPECIAL_POOL_PAGED_PTE
;
281 /* Mark the next PTE as special-pool-nonpaged */
282 PointerPte
[1].u
.Soft
.PageFileHigh
|= SPECIAL_POOL_NONPAGED_PTE
;
285 /* Finally save tag and put allocation time into the header's blocksize.
286 That time will be used to check memory consistency within the allocated
288 Header
->PoolTag
= Tag
;
289 Header
->BlockSize
= (USHORT
)TickCount
.LowPart
;
290 DPRINT1("%p\n", Entry
);
296 MiSpecialPoolCheckPattern(PUCHAR P
, PPOOL_HEADER Header
)
298 ULONG BytesToCheck
, BytesRequested
, Index
;
301 /* Get amount of bytes user requested to be allocated by clearing out the paged mask */
302 BytesRequested
= (Header
->Ulong1
& ~SPECIAL_POOL_PAGED
) & 0xFFFF;
304 /* Get a pointer to the end of user's area */
305 Ptr
= P
+ BytesRequested
;
307 /* Calculate how many bytes to check */
308 BytesToCheck
= (ULONG
)((PUCHAR
)PAGE_ALIGN(P
) + PAGE_SIZE
- Ptr
);
310 /* Remove pool header size if we're catching underruns */
311 if (((ULONG_PTR
)P
& (PAGE_SIZE
- 1)) == 0)
313 /* User buffer is located in the beginning of the page */
314 BytesToCheck
-= sizeof(POOL_HEADER
);
317 /* Check the pattern after user buffer */
318 for (Index
= 0; Index
< BytesToCheck
; Index
++)
320 /* Bugcheck if bytes don't match */
321 if (Ptr
[Index
] != Header
->BlockSize
)
323 KeBugCheckEx(BAD_POOL_HEADER
, (ULONG_PTR
)P
, (ULONG_PTR
)&Ptr
[Index
], Header
->BlockSize
, 0x24);
330 MmFreeSpecialPool(PVOID P
)
334 BOOLEAN Overruns
= FALSE
;
335 KIRQL Irql
= KeGetCurrentIrql();
337 ULONG BytesRequested
, BytesReal
= 0;
340 PMI_FREED_SPECIAL_POOL FreedHeader
;
341 LARGE_INTEGER TickCount
;
344 DPRINT1("MmFreeSpecialPool(%p)\n", P
);
347 PointerPte
= MiAddressToPte(P
);
349 /* Check if it's valid */
350 if (PointerPte
->u
.Hard
.Valid
== 0)
352 /* Bugcheck if it has NOACCESS or 0 set as protection */
353 if (PointerPte
->u
.Soft
.Protection
== MM_NOACCESS
||
354 !PointerPte
->u
.Soft
.Protection
)
356 KeBugCheckEx(BAD_POOL_HEADER
, (ULONG_PTR
)P
, (ULONG_PTR
)PointerPte
, 0, 0x20);
360 /* Determine if it's a underruns or overruns pool pointer */
361 PtrOffset
= (ULONG
)((ULONG_PTR
)P
& (PAGE_SIZE
- 1));
364 /* Pool catches overruns */
365 Header
= PAGE_ALIGN(P
);
370 /* Pool catches underruns */
371 Header
= (PPOOL_HEADER
)((PUCHAR
)PAGE_ALIGN(P
) + PAGE_SIZE
- sizeof(POOL_HEADER
));
374 /* Check if it's non paged pool */
375 if ((Header
->Ulong1
& SPECIAL_POOL_PAGED
) == 0)
377 /* Non-paged allocation, ensure that IRQ is not higher that DISPATCH */
378 ASSERT((PointerPte
+ 1)->u
.Soft
.PageFileHigh
== SPECIAL_POOL_NONPAGED_PTE
);
379 if (Irql
> DISPATCH_LEVEL
)
381 KeBugCheckEx(BAD_POOL_HEADER
, Irql
, (ULONG_PTR
)P
, 0, 0x31);
384 PoolType
= NonPagedPool
;
388 /* Paged allocation, ensure */
389 ASSERT((PointerPte
+ 1)->u
.Soft
.PageFileHigh
== SPECIAL_POOL_PAGED_PTE
);
390 if (Irql
> DISPATCH_LEVEL
)
392 KeBugCheckEx(BAD_POOL_HEADER
, Irql
, (ULONG_PTR
)P
, 1, 0x31);
395 PoolType
= PagedPool
;
398 /* Get amount of bytes user requested to be allocated by clearing out the paged mask */
399 BytesRequested
= (Header
->Ulong1
& ~SPECIAL_POOL_PAGED
) & 0xFFFF;
401 /* Check memory before the allocated user buffer in case of overruns detection */
404 /* Calculate the real placement of the buffer */
405 BytesReal
= PAGE_SIZE
- PtrOffset
;
407 /* If they mismatch, it's unrecoverable */
408 if (BytesRequested
> BytesReal
)
410 KeBugCheckEx(BAD_POOL_HEADER
, (ULONG_PTR
)P
, BytesRequested
, BytesReal
, 0x21);
413 if (BytesRequested
+ sizeof(POOL_HEADER
) < BytesReal
)
415 KeBugCheckEx(BAD_POOL_HEADER
, (ULONG_PTR
)P
, BytesRequested
, BytesReal
, 0x22);
418 /* Actually check the memory pattern */
419 for (b
= (PUCHAR
)(Header
+ 1); b
< (PUCHAR
)P
; b
++)
421 if (Header
->BlockSize
!= b
[0])
424 KeBugCheckEx(BAD_POOL_HEADER
, (ULONG_PTR
)P
, (ULONG_PTR
)b
, Header
->BlockSize
, 0x23);
429 /* Check the memory pattern after the user buffer */
430 MiSpecialPoolCheckPattern(P
, Header
);
432 /* Fill the freed header */
433 KeQueryTickCount(&TickCount
);
434 FreedHeader
= (PMI_FREED_SPECIAL_POOL
)PAGE_ALIGN(P
);
435 FreedHeader
->Signature
= 0x98764321;
436 FreedHeader
->TickCount
= TickCount
.LowPart
;
437 FreedHeader
->NumberOfBytesRequested
= BytesRequested
;
438 FreedHeader
->Pagable
= PoolType
;
439 FreedHeader
->VirtualAddress
= P
;
440 FreedHeader
->Thread
= PsGetCurrentThread();
441 /* TODO: Fill StackPointer and StackBytes */
442 FreedHeader
->StackPointer
= NULL
;
443 FreedHeader
->StackBytes
= 0;
445 if (PoolType
== NonPagedPool
)
447 /* Non pagable. Get PFN element corresponding to the PTE */
448 Pfn
= MI_PFN_ELEMENT(PointerPte
->u
.Hard
.PageFrameNumber
);
450 /* Lock PFN database */
451 ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL
);
452 Irql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
454 /* Delete this PFN */
455 MI_SET_PFN_DELETED(Pfn
);
457 /* Decrement share count of this PFN */
458 MiDecrementShareCount(Pfn
, PointerPte
->u
.Hard
.PageFrameNumber
);
461 //FIXME: Use KeFlushSingleTb() instead
462 KeFlushEntireTb(TRUE
, TRUE
);
466 /* Pagable. Delete that virtual address */
467 MiDeleteSystemPageableVm(PointerPte
, 1, 0, NULL
);
469 /* Lock PFN database */
470 ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL
);
471 Irql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
474 /* Mark next PTE as invalid */
475 PointerPte
[1].u
.Long
= 0; //|= 8000;
477 /* Make sure that the last entry is really the last one */
478 ASSERT(MiSpecialPoolLastPte
->u
.List
.NextEntry
== MM_EMPTY_PTE_LIST
);
480 /* Update the current last PTE next pointer */
481 MiSpecialPoolLastPte
->u
.List
.NextEntry
= PointerPte
- MmSystemPteBase
;
483 /* PointerPte becomes the new last PTE */
484 PointerPte
->u
.List
.NextEntry
= MM_EMPTY_PTE_LIST
;
485 MiSpecialPoolLastPte
= PointerPte
;
487 /* Release the PFN database lock */
488 KeReleaseQueuedSpinLock(LockQueuePfnLock
, Irql
);
499 POOL_TYPE PoolType
= PagedPool
;
501 // First allocate/free
502 for (i
=0; i
<100; i
++)
504 ByteSize
= (100 * (i
+1)) % (PAGE_SIZE
- sizeof(POOL_HEADER
));
505 p1
= MmAllocateSpecialPool(ByteSize
, 'TEST', PoolType
, 0);
506 DPRINT1("p1 %p size %lu\n", p1
, ByteSize
);
507 MmFreeSpecialPool(p1
);
510 // Now allocate all at once, then free at once
511 for (i
=0; i
<100; i
++)
513 ByteSize
= (100 * (i
+1)) % (PAGE_SIZE
- sizeof(POOL_HEADER
));
514 p2
[i
] = MmAllocateSpecialPool(ByteSize
, 'TEST', PoolType
, 0);
515 DPRINT1("p2[%lu] %p size %lu\n", i
, p1
, ByteSize
);
517 for (i
=0; i
<100; i
++)
519 DPRINT1("Freeing %p\n", p2
[i
]);
520 MmFreeSpecialPool(p2
[i
]);
523 // Overrun the buffer to test
525 //p3 = MmAllocateSpecialPool(ByteSize, 'TEST', NonPagedPool, 0);
526 //p3[ByteSize] = 0xF1; // This should cause an exception
528 // Underrun the buffer to test
529 //p3 = MmAllocateSpecialPool(ByteSize, 'TEST', NonPagedPool, 1);
531 //*p3 = 0xF1; // This should cause an exception