[NTOSKRNL]
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / special.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/special.c
5 * PURPOSE: ARM Memory Manager Special Pool implementation
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /*
10 References:
11 http://msdn.microsoft.com/en-us/library/ff551832(v=VS.85).aspx
12 */
13
14 /* INCLUDES *******************************************************************/
15
16 #include <ntoskrnl.h>
17 #define NDEBUG
18 #include <debug.h>
19
20 #define MODULE_INVOLVED_IN_ARM3
21 #include "../ARM3/miarm.h"
22
23 extern PMMPTE MmSystemPteBase;
24
25 PMMPTE
26 NTAPI
27 MiReserveAlignedSystemPtes(IN ULONG NumberOfPtes,
28 IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType,
29 IN ULONG Alignment);
30
31 /* GLOBALS ********************************************************************/
32
33 #define SPECIAL_POOL_PAGED_PTE 0x2000
34 #define SPECIAL_POOL_NONPAGED_PTE 0x4000
35 #define SPECIAL_POOL_PAGED 0x8000
36
37 PVOID MmSpecialPoolStart;
38 PVOID MmSpecialPoolEnd;
39 PVOID MiSpecialPoolExtra;
40 ULONG MiSpecialPoolExtraCount;
41
42 PMMPTE MiSpecialPoolFirstPte;
43 PMMPTE MiSpecialPoolLastPte;
44
45 PFN_NUMBER MiSpecialPagesNonPagedMaximum;
46
47 BOOLEAN MmSpecialPoolCatchOverruns = TRUE;
48
49 typedef struct _MI_FREED_SPECIAL_POOL
50 {
51 POOL_HEADER OverlaidPoolHeader;
52 /* TODO: Add overlaid verifier pool header */
53 ULONG Signature;
54 ULONG TickCount;
55 ULONG NumberOfBytesRequested;
56 BOOLEAN Pagable;
57 PVOID VirtualAddress;
58 PVOID StackPointer;
59 ULONG StackBytes;
60 PETHREAD Thread;
61 UCHAR StackData[0x400];
62 } MI_FREED_SPECIAL_POOL, *PMI_FREED_SPECIAL_POOL;
63
64 /* PRIVATE FUNCTIONS **********************************************************/
65
66 VOID NTAPI MiTestSpecialPool();
67
68 BOOLEAN
69 NTAPI
70 MmUseSpecialPool(SIZE_T NumberOfBytes, ULONG Tag)
71 {
72 /* Special pool is not suitable for allocations bigger than 1 page */
73 if (NumberOfBytes > (PAGE_SIZE - sizeof(POOL_HEADER)))
74 return FALSE;
75
76 // FIXME
77 //return TRUE;
78 return FALSE;
79 }
80
81 BOOLEAN
82 NTAPI
83 MmIsSpecialPoolAddress(PVOID P)
84 {
85 return((P >= MmSpecialPoolStart) &&
86 (P <= MmSpecialPoolEnd));
87 }
88
89 VOID
90 NTAPI
91 MiInitializeSpecialPool()
92 {
93 ULONG SpecialPoolPtes, i;
94 PMMPTE PointerPte;
95
96 /* Check if there is a special pool tag */
97 if ((MmSpecialPoolTag == 0) ||
98 (MmSpecialPoolTag == -1)) return;
99
100 /* Calculate number of system PTEs for the special pool */
101 if ( MmNumberOfSystemPtes >= 0x3000 )
102 SpecialPoolPtes = MmNumberOfSystemPtes / 3;
103 else
104 SpecialPoolPtes = MmNumberOfSystemPtes / 6;
105
106 /* Don't let the number go too high */
107 if (SpecialPoolPtes > 0x6000) SpecialPoolPtes = 0x6000;
108
109 /* Round up to the page size */
110 SpecialPoolPtes = PAGE_ROUND_UP(SpecialPoolPtes);
111
112 ASSERT((SpecialPoolPtes & (PTE_PER_PAGE - 1)) == 0);
113
114 /* Reserve those PTEs */
115 do
116 {
117 PointerPte = MiReserveAlignedSystemPtes(SpecialPoolPtes, 0, /*0x400000*/0); // FIXME:
118 if (PointerPte) break;
119
120 /* Reserving didn't work, so try to reduce the requested size */
121 ASSERT(SpecialPoolPtes >= PTE_PER_PAGE);
122 SpecialPoolPtes -= 1024;
123 } while (SpecialPoolPtes);
124
125 /* Fail if we couldn't reserve them at all */
126 if (!SpecialPoolPtes) return;
127
128 /* Make sure we got enough */
129 ASSERT(SpecialPoolPtes >= PTE_PER_PAGE);
130
131 /* Save first PTE and its address */
132 MiSpecialPoolFirstPte = PointerPte;
133 MmSpecialPoolStart = MiPteToAddress(PointerPte);
134
135 for (i = 0; i<512; i++)
136 {
137 /* Point it to the next entry */
138 PointerPte->u.List.NextEntry = &PointerPte[2] - MmSystemPteBase;
139
140 /* Move to the next pair */
141 PointerPte += 2;
142 }
143
144 /* Save extra values */
145 MiSpecialPoolExtra = PointerPte;
146 MiSpecialPoolExtraCount = SpecialPoolPtes - 1024;
147
148 /* Mark the previous PTE as the last one */
149 MiSpecialPoolLastPte = PointerPte - 2;
150 MiSpecialPoolLastPte->u.List.NextEntry = MM_EMPTY_PTE_LIST;
151
152 /* Save end address of the special pool */
153 MmSpecialPoolEnd = MiPteToAddress(MiSpecialPoolLastPte + 1);
154
155 /* Calculate maximum non-paged part of the special pool */
156 MiSpecialPagesNonPagedMaximum = MmResidentAvailablePages >> 4;
157
158 /* And limit it if it turned out to be too big */
159 if (MmNumberOfPhysicalPages > 0x3FFF)
160 MiSpecialPagesNonPagedMaximum = MmResidentAvailablePages >> 3;
161
162 DPRINT1("Special pool start %p - end %p\n", MmSpecialPoolStart, MmSpecialPoolEnd);
163
164 //MiTestSpecialPool();
165 }
166
167 PVOID
168 NTAPI
169 MmAllocateSpecialPool(SIZE_T NumberOfBytes, ULONG Tag, POOL_TYPE PoolType, ULONG SpecialType)
170 {
171 KIRQL Irql;
172 MMPTE TempPte = ValidKernelPte;
173 PMMPTE PointerPte;
174 PFN_NUMBER PageFrameNumber;
175 LARGE_INTEGER TickCount;
176 PVOID Entry;
177 PPOOL_HEADER Header;
178
179 DPRINT1("MmAllocateSpecialPool(%x %x %x %x)\n", NumberOfBytes, Tag, PoolType, SpecialType);
180
181 /* Check if the pool is initialized and quit if it's not */
182 if (!MiSpecialPoolFirstPte) return NULL;
183
184 /* Get the pool type */
185 PoolType &= BASE_POOL_TYPE_MASK;
186
187 /* Check whether current IRQL matches the pool type */
188 Irql = KeGetCurrentIrql();
189
190 if (((PoolType == PagedPool) && (Irql > APC_LEVEL)) ||
191 ((PoolType != PagedPool) && (Irql > DISPATCH_LEVEL)))
192 {
193 /* Bad caller */
194 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION, Irql, PoolType, NumberOfBytes, 0x30);
195 }
196
197 /* TODO: Take into account various limitations */
198 /*if ((PoolType != NonPagedPool) &&
199 MiSpecialPagesNonPaged > MiSpecialPagesNonPagedMaximum)*/
200
201 /* Lock PFN database */
202 Irql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
203
204 /* Reject allocation in case amount of available pages is too small */
205 if (MmAvailablePages < 0x100)
206 {
207 /* Release the PFN database lock */
208 KeReleaseQueuedSpinLock(LockQueuePfnLock, Irql);
209 DPRINT1("Special pool: MmAvailablePages 0x%x is too small\n", MmAvailablePages);
210 return NULL;
211 }
212
213 /* Reject allocation if special pool PTE list is exhausted */
214 if (MiSpecialPoolFirstPte->u.List.NextEntry == MM_EMPTY_PTE_LIST)
215 {
216 /* Release the PFN database lock */
217 KeReleaseQueuedSpinLock(LockQueuePfnLock, Irql);
218 DPRINT1("Special pool: No PTEs left!\n");
219 /* TODO: Expand the special pool */
220 return NULL;
221 }
222
223 /* Save allocation time */
224 KeQueryTickCount(&TickCount);
225
226 /* Get a pointer to the first PTE */
227 PointerPte = MiSpecialPoolFirstPte;
228
229 /* Set the first PTE pointer to the next one in the list */
230 MiSpecialPoolFirstPte = MmSystemPteBase + PointerPte->u.List.NextEntry;
231
232 /* Allocate a physical page */
233 PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());
234
235 /* Initialize PFN and make it valid */
236 TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
237 MiInitializePfnAndMakePteValid(PageFrameNumber, PointerPte, TempPte);
238
239 /* Release the PFN database lock */
240 KeReleaseQueuedSpinLock(LockQueuePfnLock, Irql);
241
242 /* Put some content into the page. Low value of tick count would do */
243 Entry = MiPteToAddress(PointerPte);
244 RtlFillMemory(Entry, PAGE_SIZE, TickCount.LowPart);
245
246 /* Calculate header and entry addresses */
247 if ((SpecialType != 0) &&
248 ((SpecialType == 1) || (!MmSpecialPoolCatchOverruns)))
249 {
250 /* We catch underruns. Data is at the beginning of the page */
251 Header = (PPOOL_HEADER)((PUCHAR)Entry + PAGE_SIZE - sizeof(POOL_HEADER));
252 }
253 else
254 {
255 /* We catch overruns. Data is at the end of the page */
256 Header = (PPOOL_HEADER)Entry;
257 Entry = (PVOID)((ULONG_PTR)((PUCHAR)Entry - NumberOfBytes + PAGE_SIZE) & ~((LONG_PTR)sizeof(POOL_HEADER) - 1));
258 }
259
260 /* Initialize the header */
261 RtlZeroMemory(Header, sizeof(POOL_HEADER));
262
263 /* Save allocation size there */
264 Header->Ulong1 = (ULONG)NumberOfBytes;
265
266 /* Make sure it's all good */
267 ASSERT((NumberOfBytes <= PAGE_SIZE - sizeof(POOL_HEADER)) &&
268 (PAGE_SIZE <= 32 * 1024));
269
270 /* Mark it as paged or nonpaged */
271 if (PoolType == PagedPool)
272 {
273 /* Add pagedpool flag into the pool header too */
274 Header->Ulong1 |= SPECIAL_POOL_PAGED;
275
276 /* Also mark the next PTE as special-pool-paged */
277 PointerPte[1].u.Soft.PageFileHigh |= SPECIAL_POOL_PAGED_PTE;
278 }
279 else
280 {
281 /* Mark the next PTE as special-pool-nonpaged */
282 PointerPte[1].u.Soft.PageFileHigh |= SPECIAL_POOL_NONPAGED_PTE;
283 }
284
285 /* Finally save tag and put allocation time into the header's blocksize.
286 That time will be used to check memory consistency within the allocated
287 page. */
288 Header->PoolTag = Tag;
289 Header->BlockSize = (USHORT)TickCount.LowPart;
290 DPRINT1("%p\n", Entry);
291 return Entry;
292 }
293
294 VOID
295 NTAPI
296 MiSpecialPoolCheckPattern(PUCHAR P, PPOOL_HEADER Header)
297 {
298 ULONG BytesToCheck, BytesRequested, Index;
299 PUCHAR Ptr;
300
301 /* Get amount of bytes user requested to be allocated by clearing out the paged mask */
302 BytesRequested = (Header->Ulong1 & ~SPECIAL_POOL_PAGED) & 0xFFFF;
303
304 /* Get a pointer to the end of user's area */
305 Ptr = P + BytesRequested;
306
307 /* Calculate how many bytes to check */
308 BytesToCheck = (ULONG)((PUCHAR)PAGE_ALIGN(P) + PAGE_SIZE - Ptr);
309
310 /* Remove pool header size if we're catching underruns */
311 if (((ULONG_PTR)P & (PAGE_SIZE - 1)) == 0)
312 {
313 /* User buffer is located in the beginning of the page */
314 BytesToCheck -= sizeof(POOL_HEADER);
315 }
316
317 /* Check the pattern after user buffer */
318 for (Index = 0; Index < BytesToCheck; Index++)
319 {
320 /* Bugcheck if bytes don't match */
321 if (Ptr[Index] != Header->BlockSize)
322 {
323 KeBugCheckEx(BAD_POOL_HEADER, (ULONG_PTR)P, (ULONG_PTR)&Ptr[Index], Header->BlockSize, 0x24);
324 }
325 }
326 }
327
328 VOID
329 NTAPI
330 MmFreeSpecialPool(PVOID P)
331 {
332 PMMPTE PointerPte;
333 PPOOL_HEADER Header;
334 BOOLEAN Overruns = FALSE;
335 KIRQL Irql = KeGetCurrentIrql();
336 POOL_TYPE PoolType;
337 ULONG BytesRequested, BytesReal = 0;
338 ULONG PtrOffset;
339 PUCHAR b;
340 PMI_FREED_SPECIAL_POOL FreedHeader;
341 LARGE_INTEGER TickCount;
342 PMMPFN Pfn;
343
344 DPRINT1("MmFreeSpecialPool(%p)\n", P);
345
346 /* Get the PTE */
347 PointerPte = MiAddressToPte(P);
348
349 /* Check if it's valid */
350 if (PointerPte->u.Hard.Valid == 0)
351 {
352 /* Bugcheck if it has NOACCESS or 0 set as protection */
353 if (PointerPte->u.Soft.Protection == MM_NOACCESS ||
354 !PointerPte->u.Soft.Protection)
355 {
356 KeBugCheckEx(BAD_POOL_HEADER, (ULONG_PTR)P, (ULONG_PTR)PointerPte, 0, 0x20);
357 }
358 }
359
360 /* Determine if it's a underruns or overruns pool pointer */
361 PtrOffset = (ULONG)((ULONG_PTR)P & (PAGE_SIZE - 1));
362 if (PtrOffset)
363 {
364 /* Pool catches overruns */
365 Header = PAGE_ALIGN(P);
366 Overruns = TRUE;
367 }
368 else
369 {
370 /* Pool catches underruns */
371 Header = (PPOOL_HEADER)((PUCHAR)PAGE_ALIGN(P) + PAGE_SIZE - sizeof(POOL_HEADER));
372 }
373
374 /* Check if it's non paged pool */
375 if ((Header->Ulong1 & SPECIAL_POOL_PAGED) == 0)
376 {
377 /* Non-paged allocation, ensure that IRQ is not higher that DISPATCH */
378 ASSERT((PointerPte + 1)->u.Soft.PageFileHigh == SPECIAL_POOL_NONPAGED_PTE);
379 if (Irql > DISPATCH_LEVEL)
380 {
381 KeBugCheckEx(BAD_POOL_HEADER, Irql, (ULONG_PTR)P, 0, 0x31);
382 }
383
384 PoolType = NonPagedPool;
385 }
386 else
387 {
388 /* Paged allocation, ensure */
389 ASSERT((PointerPte + 1)->u.Soft.PageFileHigh == SPECIAL_POOL_PAGED_PTE);
390 if (Irql > DISPATCH_LEVEL)
391 {
392 KeBugCheckEx(BAD_POOL_HEADER, Irql, (ULONG_PTR)P, 1, 0x31);
393 }
394
395 PoolType = PagedPool;
396 }
397
398 /* Get amount of bytes user requested to be allocated by clearing out the paged mask */
399 BytesRequested = (Header->Ulong1 & ~SPECIAL_POOL_PAGED) & 0xFFFF;
400
401 /* Check memory before the allocated user buffer in case of overruns detection */
402 if (Overruns)
403 {
404 /* Calculate the real placement of the buffer */
405 BytesReal = PAGE_SIZE - PtrOffset;
406
407 /* If they mismatch, it's unrecoverable */
408 if (BytesRequested > BytesReal)
409 {
410 KeBugCheckEx(BAD_POOL_HEADER, (ULONG_PTR)P, BytesRequested, BytesReal, 0x21);
411 }
412
413 if (BytesRequested + sizeof(POOL_HEADER) < BytesReal)
414 {
415 KeBugCheckEx(BAD_POOL_HEADER, (ULONG_PTR)P, BytesRequested, BytesReal, 0x22);
416 }
417
418 /* Actually check the memory pattern */
419 for (b = (PUCHAR)(Header + 1); b < (PUCHAR)P; b++)
420 {
421 if (Header->BlockSize != b[0])
422 {
423 /* Bytes mismatch */
424 KeBugCheckEx(BAD_POOL_HEADER, (ULONG_PTR)P, (ULONG_PTR)b, Header->BlockSize, 0x23);
425 }
426 }
427 }
428
429 /* Check the memory pattern after the user buffer */
430 MiSpecialPoolCheckPattern(P, Header);
431
432 /* Fill the freed header */
433 KeQueryTickCount(&TickCount);
434 FreedHeader = (PMI_FREED_SPECIAL_POOL)PAGE_ALIGN(P);
435 FreedHeader->Signature = 0x98764321;
436 FreedHeader->TickCount = TickCount.LowPart;
437 FreedHeader->NumberOfBytesRequested = BytesRequested;
438 FreedHeader->Pagable = PoolType;
439 FreedHeader->VirtualAddress = P;
440 FreedHeader->Thread = PsGetCurrentThread();
441 /* TODO: Fill StackPointer and StackBytes */
442 FreedHeader->StackPointer = NULL;
443 FreedHeader->StackBytes = 0;
444
445 if (PoolType == NonPagedPool)
446 {
447 /* Non pagable. Get PFN element corresponding to the PTE */
448 Pfn = MI_PFN_ELEMENT(PointerPte->u.Hard.PageFrameNumber);
449
450 /* Lock PFN database */
451 ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
452 Irql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
453
454 /* Delete this PFN */
455 MI_SET_PFN_DELETED(Pfn);
456
457 /* Decrement share count of this PFN */
458 MiDecrementShareCount(Pfn, PointerPte->u.Hard.PageFrameNumber);
459
460 /* Flush the TLB */
461 //FIXME: Use KeFlushSingleTb() instead
462 KeFlushEntireTb(TRUE, TRUE);
463 }
464 else
465 {
466 /* Pagable. Delete that virtual address */
467 MiDeleteSystemPageableVm(PointerPte, 1, 0, NULL);
468
469 /* Lock PFN database */
470 ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
471 Irql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
472 }
473
474 /* Mark next PTE as invalid */
475 PointerPte[1].u.Long = 0; //|= 8000;
476
477 /* Make sure that the last entry is really the last one */
478 ASSERT(MiSpecialPoolLastPte->u.List.NextEntry == MM_EMPTY_PTE_LIST);
479
480 /* Update the current last PTE next pointer */
481 MiSpecialPoolLastPte->u.List.NextEntry = PointerPte - MmSystemPteBase;
482
483 /* PointerPte becomes the new last PTE */
484 PointerPte->u.List.NextEntry = MM_EMPTY_PTE_LIST;
485 MiSpecialPoolLastPte = PointerPte;
486
487 /* Release the PFN database lock */
488 KeReleaseQueuedSpinLock(LockQueuePfnLock, Irql);
489 }
490
491 VOID
492 NTAPI
493 MiTestSpecialPool()
494 {
495 ULONG i;
496 PVOID p1, p2[100];
497 //PUCHAR p3;
498 ULONG ByteSize;
499 POOL_TYPE PoolType = PagedPool;
500
501 // First allocate/free
502 for (i=0; i<100; i++)
503 {
504 ByteSize = (100 * (i+1)) % (PAGE_SIZE - sizeof(POOL_HEADER));
505 p1 = MmAllocateSpecialPool(ByteSize, 'TEST', PoolType, 0);
506 DPRINT1("p1 %p size %lu\n", p1, ByteSize);
507 MmFreeSpecialPool(p1);
508 }
509
510 // Now allocate all at once, then free at once
511 for (i=0; i<100; i++)
512 {
513 ByteSize = (100 * (i+1)) % (PAGE_SIZE - sizeof(POOL_HEADER));
514 p2[i] = MmAllocateSpecialPool(ByteSize, 'TEST', PoolType, 0);
515 DPRINT1("p2[%lu] %p size %lu\n", i, p1, ByteSize);
516 }
517 for (i=0; i<100; i++)
518 {
519 DPRINT1("Freeing %p\n", p2[i]);
520 MmFreeSpecialPool(p2[i]);
521 }
522
523 // Overrun the buffer to test
524 //ByteSize = 16;
525 //p3 = MmAllocateSpecialPool(ByteSize, 'TEST', NonPagedPool, 0);
526 //p3[ByteSize] = 0xF1; // This should cause an exception
527
528 // Underrun the buffer to test
529 //p3 = MmAllocateSpecialPool(ByteSize, 'TEST', NonPagedPool, 1);
530 //p3--;
531 //*p3 = 0xF1; // This should cause an exception
532
533 }
534
535 /* EOF */