744709db7d6edc11164d4f7f25c3605f12589915
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / special.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/special.c
5 * PURPOSE: ARM Memory Manager Special Pool implementation
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /*
10 References:
11 http://msdn.microsoft.com/en-us/library/ff551832(v=VS.85).aspx
12 */
13
14 /* INCLUDES *******************************************************************/
15
16 #include <ntoskrnl.h>
17 #define NDEBUG
18 #include <debug.h>
19
20 #define MODULE_INVOLVED_IN_ARM3
21 #include <mm/ARM3/miarm.h>
22
23 extern ULONG ExpPoolFlags;
24 extern PMMPTE MmSystemPteBase;
25
26 PMMPTE
27 NTAPI
28 MiReserveAlignedSystemPtes(IN ULONG NumberOfPtes,
29 IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType,
30 IN ULONG Alignment);
31
32 /* GLOBALS ********************************************************************/
33
34 #define SPECIAL_POOL_PAGED_PTE 0x2000
35 #define SPECIAL_POOL_NONPAGED_PTE 0x4000
36 #define SPECIAL_POOL_PAGED 0x8000
37
38 PVOID MmSpecialPoolStart;
39 PVOID MmSpecialPoolEnd;
40 PVOID MiSpecialPoolExtra;
41 ULONG MiSpecialPoolExtraCount;
42
43 PMMPTE MiSpecialPoolFirstPte;
44 PMMPTE MiSpecialPoolLastPte;
45
46 PFN_COUNT MmSpecialPagesInUse;
47 PFN_COUNT MmSpecialPagesInUsePeak;
48 PFN_COUNT MiSpecialPagesPagable;
49 PFN_COUNT MiSpecialPagesPagablePeak;
50 PFN_COUNT MiSpecialPagesNonPaged;
51 PFN_COUNT MiSpecialPagesNonPagedPeak;
52 PFN_COUNT MiSpecialPagesNonPagedMaximum;
53
54 BOOLEAN MmSpecialPoolCatchOverruns = TRUE;
55
56 typedef struct _MI_FREED_SPECIAL_POOL
57 {
58 POOL_HEADER OverlaidPoolHeader;
59 /* TODO: Add overlaid verifier pool header */
60 ULONG Signature;
61 ULONG TickCount;
62 ULONG NumberOfBytesRequested;
63 BOOLEAN Pagable;
64 PVOID VirtualAddress;
65 PVOID StackPointer;
66 ULONG StackBytes;
67 PETHREAD Thread;
68 UCHAR StackData[0x400];
69 } MI_FREED_SPECIAL_POOL, *PMI_FREED_SPECIAL_POOL;
70
71 /* PRIVATE FUNCTIONS **********************************************************/
72
73 VOID NTAPI MiTestSpecialPool(VOID);
74
75 BOOLEAN
76 NTAPI
77 MmUseSpecialPool(SIZE_T NumberOfBytes, ULONG Tag)
78 {
79 /* Special pool is not suitable for allocations bigger than 1 page */
80 if (NumberOfBytes > (PAGE_SIZE - sizeof(POOL_HEADER)))
81 {
82 return FALSE;
83 }
84
85 if (MmSpecialPoolTag == '*')
86 {
87 return TRUE;
88 }
89
90 return Tag == MmSpecialPoolTag;
91 }
92
93 BOOLEAN
94 NTAPI
95 MmIsSpecialPoolAddress(PVOID P)
96 {
97 return ((P >= MmSpecialPoolStart) &&
98 (P <= MmSpecialPoolEnd));
99 }
100
101 BOOLEAN
102 NTAPI
103 MmIsSpecialPoolAddressFree(PVOID P)
104 {
105 PMMPTE PointerPte;
106
107 ASSERT(MmIsSpecialPoolAddress(P));
108 PointerPte = MiAddressToPte(P);
109
110 if (PointerPte->u.Soft.PageFileHigh == SPECIAL_POOL_PAGED_PTE ||
111 PointerPte->u.Soft.PageFileHigh == SPECIAL_POOL_NONPAGED_PTE)
112 {
113 /* Guard page PTE */
114 return FALSE;
115 }
116
117 /* Free PTE */
118 return TRUE;
119 }
120
121 VOID
122 NTAPI
123 MiInitializeSpecialPool(VOID)
124 {
125 ULONG SpecialPoolPtes, i;
126 PMMPTE PointerPte;
127
128 /* Check if there is a special pool tag */
129 if ((MmSpecialPoolTag == 0) ||
130 (MmSpecialPoolTag == -1)) return;
131
132 /* Calculate number of system PTEs for the special pool */
133 if (MmNumberOfSystemPtes >= 0x3000)
134 SpecialPoolPtes = MmNumberOfSystemPtes / 3;
135 else
136 SpecialPoolPtes = MmNumberOfSystemPtes / 6;
137
138 /* Don't let the number go too high */
139 if (SpecialPoolPtes > 0x6000) SpecialPoolPtes = 0x6000;
140
141 /* Round up to the page size */
142 SpecialPoolPtes = PAGE_ROUND_UP(SpecialPoolPtes);
143
144 ASSERT((SpecialPoolPtes & (PTE_PER_PAGE - 1)) == 0);
145
146 /* Reserve those PTEs */
147 do
148 {
149 PointerPte = MiReserveAlignedSystemPtes(SpecialPoolPtes,
150 SystemPteSpace,
151 /*0x400000*/0); // FIXME:
152 if (PointerPte) break;
153
154 /* Reserving didn't work, so try to reduce the requested size */
155 ASSERT(SpecialPoolPtes >= PTE_PER_PAGE);
156 SpecialPoolPtes -= PTE_PER_PAGE;
157 } while (SpecialPoolPtes);
158
159 /* Fail if we couldn't reserve them at all */
160 if (!SpecialPoolPtes) return;
161
162 /* Make sure we got enough */
163 ASSERT(SpecialPoolPtes >= PTE_PER_PAGE);
164
165 /* Save first PTE and its address */
166 MiSpecialPoolFirstPte = PointerPte;
167 MmSpecialPoolStart = MiPteToAddress(PointerPte);
168
169 for (i = 0; i < PTE_PER_PAGE / 2; i++)
170 {
171 /* Point it to the next entry */
172 PointerPte->u.List.NextEntry = &PointerPte[2] - MmSystemPteBase;
173
174 /* Move to the next pair */
175 PointerPte += 2;
176 }
177
178 /* Save extra values */
179 MiSpecialPoolExtra = PointerPte;
180 MiSpecialPoolExtraCount = SpecialPoolPtes - PTE_PER_PAGE;
181
182 /* Mark the previous PTE as the last one */
183 MiSpecialPoolLastPte = PointerPte - 2;
184 MiSpecialPoolLastPte->u.List.NextEntry = MM_EMPTY_PTE_LIST;
185
186 /* Save end address of the special pool */
187 MmSpecialPoolEnd = MiPteToAddress(MiSpecialPoolLastPte + 1);
188
189 /* Calculate maximum non-paged part of the special pool */
190 MiSpecialPagesNonPagedMaximum = MmResidentAvailablePages >> 4;
191
192 /* And limit it if it turned out to be too big */
193 if (MmNumberOfPhysicalPages > 0x3FFF)
194 MiSpecialPagesNonPagedMaximum = MmResidentAvailablePages >> 3;
195
196 DPRINT1("Special pool start %p - end %p\n", MmSpecialPoolStart, MmSpecialPoolEnd);
197 ExpPoolFlags |= POOL_FLAG_SPECIAL_POOL;
198
199 //MiTestSpecialPool();
200 }
201
202 NTSTATUS
203 NTAPI
204 MmExpandSpecialPool(VOID)
205 {
206 ULONG i;
207 PMMPTE PointerPte;
208
209 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
210
211 if (MiSpecialPoolExtraCount == 0)
212 return STATUS_INSUFFICIENT_RESOURCES;
213
214 PointerPte = MiSpecialPoolExtra;
215 ASSERT(MiSpecialPoolFirstPte == MiSpecialPoolLastPte);
216 ASSERT(MiSpecialPoolFirstPte->u.List.NextEntry == MM_EMPTY_PTE_LIST);
217 MiSpecialPoolFirstPte->u.List.NextEntry = PointerPte - MmSystemPteBase;
218
219 ASSERT(MiSpecialPoolExtraCount >= PTE_PER_PAGE);
220 for (i = 0; i < PTE_PER_PAGE / 2; i++)
221 {
222 /* Point it to the next entry */
223 PointerPte->u.List.NextEntry = &PointerPte[2] - MmSystemPteBase;
224
225 /* Move to the next pair */
226 PointerPte += 2;
227 }
228
229 /* Save remaining extra values */
230 MiSpecialPoolExtra = PointerPte;
231 MiSpecialPoolExtraCount -= PTE_PER_PAGE;
232
233 /* Mark the previous PTE as the last one */
234 MiSpecialPoolLastPte = PointerPte - 2;
235 MiSpecialPoolLastPte->u.List.NextEntry = MM_EMPTY_PTE_LIST;
236
237 /* Save new end address of the special pool */
238 MmSpecialPoolEnd = MiPteToAddress(MiSpecialPoolLastPte + 1);
239
240 return STATUS_SUCCESS;
241 }
242
243 PVOID
244 NTAPI
245 MmAllocateSpecialPool(SIZE_T NumberOfBytes, ULONG Tag, POOL_TYPE PoolType, ULONG SpecialType)
246 {
247 KIRQL Irql;
248 MMPTE TempPte = ValidKernelPte;
249 PMMPTE PointerPte;
250 PFN_NUMBER PageFrameNumber;
251 LARGE_INTEGER TickCount;
252 PVOID Entry;
253 PPOOL_HEADER Header;
254 PFN_COUNT PagesInUse;
255
256 DPRINT("MmAllocateSpecialPool(%x %x %x %x)\n", NumberOfBytes, Tag, PoolType, SpecialType);
257
258 /* Check if the pool is initialized and quit if it's not */
259 if (!MiSpecialPoolFirstPte) return NULL;
260
261 /* Get the pool type */
262 PoolType &= BASE_POOL_TYPE_MASK;
263
264 /* Check whether current IRQL matches the pool type */
265 Irql = KeGetCurrentIrql();
266
267 if (((PoolType == PagedPool) && (Irql > APC_LEVEL)) ||
268 ((PoolType != PagedPool) && (Irql > DISPATCH_LEVEL)))
269 {
270 /* Bad caller */
271 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION,
272 Irql,
273 PoolType,
274 NumberOfBytes,
275 0x30);
276 }
277
278 /* TODO: Take into account various limitations */
279
280 /* Heed the maximum limit of nonpaged pages */
281 if ((PoolType == NonPagedPool) &&
282 (MiSpecialPagesNonPaged > MiSpecialPagesNonPagedMaximum))
283 {
284 return NULL;
285 }
286
287 /* Lock PFN database */
288 Irql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
289
290 /* Reject allocation in case amount of available pages is too small */
291 if (MmAvailablePages < 0x100)
292 {
293 /* Release the PFN database lock */
294 KeReleaseQueuedSpinLock(LockQueuePfnLock, Irql);
295 DPRINT1("Special pool: MmAvailablePages 0x%x is too small\n", MmAvailablePages);
296 return NULL;
297 }
298
299 /* Check if special pool PTE list is exhausted */
300 if (MiSpecialPoolFirstPte->u.List.NextEntry == MM_EMPTY_PTE_LIST)
301 {
302 /* Try to expand it */
303 if (!NT_SUCCESS(MmExpandSpecialPool()))
304 {
305 /* No reserves left, reject this allocation */
306 static int once;
307 KeReleaseQueuedSpinLock(LockQueuePfnLock, Irql);
308 if (!once++) DPRINT1("Special pool: No PTEs left!\n");
309 return NULL;
310 }
311 ASSERT(MiSpecialPoolFirstPte->u.List.NextEntry != MM_EMPTY_PTE_LIST);
312 }
313
314 /* Save allocation time */
315 KeQueryTickCount(&TickCount);
316
317 /* Get a pointer to the first PTE */
318 PointerPte = MiSpecialPoolFirstPte;
319
320 /* Set the first PTE pointer to the next one in the list */
321 MiSpecialPoolFirstPte = MmSystemPteBase + PointerPte->u.List.NextEntry;
322
323 /* Allocate a physical page */
324 PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());
325
326 /* Initialize PFN and make it valid */
327 TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
328 MiInitializePfnAndMakePteValid(PageFrameNumber, PointerPte, TempPte);
329
330 /* Release the PFN database lock */
331 KeReleaseQueuedSpinLock(LockQueuePfnLock, Irql);
332
333 /* Increase page counter */
334 PagesInUse = InterlockedIncrementUL(&MmSpecialPagesInUse);
335 if (PagesInUse > MmSpecialPagesInUsePeak)
336 MmSpecialPagesInUsePeak = PagesInUse;
337
338 /* Put some content into the page. Low value of tick count would do */
339 Entry = MiPteToAddress(PointerPte);
340 RtlFillMemory(Entry, PAGE_SIZE, TickCount.LowPart);
341
342 /* Calculate header and entry addresses */
343 if ((SpecialType != 0) &&
344 ((SpecialType == 1) || (!MmSpecialPoolCatchOverruns)))
345 {
346 /* We catch underruns. Data is at the beginning of the page */
347 Header = (PPOOL_HEADER)((PUCHAR)Entry + PAGE_SIZE - sizeof(POOL_HEADER));
348 }
349 else
350 {
351 /* We catch overruns. Data is at the end of the page */
352 Header = (PPOOL_HEADER)Entry;
353 Entry = (PVOID)((ULONG_PTR)((PUCHAR)Entry - NumberOfBytes + PAGE_SIZE) & ~((LONG_PTR)sizeof(POOL_HEADER) - 1));
354 }
355
356 /* Initialize the header */
357 RtlZeroMemory(Header, sizeof(POOL_HEADER));
358
359 /* Save allocation size there */
360 Header->Ulong1 = (ULONG)NumberOfBytes;
361
362 /* Make sure it's all good */
363 ASSERT((NumberOfBytes <= PAGE_SIZE - sizeof(POOL_HEADER)) &&
364 (PAGE_SIZE <= 32 * 1024));
365
366 /* Mark it as paged or nonpaged */
367 if (PoolType == PagedPool)
368 {
369 /* Add pagedpool flag into the pool header too */
370 Header->Ulong1 |= SPECIAL_POOL_PAGED;
371
372 /* Also mark the next PTE as special-pool-paged */
373 PointerPte[1].u.Soft.PageFileHigh |= SPECIAL_POOL_PAGED_PTE;
374
375 /* Increase pagable counter */
376 PagesInUse = InterlockedIncrementUL(&MiSpecialPagesPagable);
377 if (PagesInUse > MiSpecialPagesPagablePeak)
378 MiSpecialPagesPagablePeak = PagesInUse;
379 }
380 else
381 {
382 /* Mark the next PTE as special-pool-nonpaged */
383 PointerPte[1].u.Soft.PageFileHigh |= SPECIAL_POOL_NONPAGED_PTE;
384
385 /* Increase nonpaged counter */
386 PagesInUse = InterlockedIncrementUL(&MiSpecialPagesNonPaged);
387 if (PagesInUse > MiSpecialPagesNonPagedPeak)
388 MiSpecialPagesNonPagedPeak = PagesInUse;
389 }
390
391 /* Finally save tag and put allocation time into the header's blocksize.
392 That time will be used to check memory consistency within the allocated
393 page. */
394 Header->PoolTag = Tag;
395 Header->BlockSize = (UCHAR)TickCount.LowPart;
396 DPRINT("%p\n", Entry);
397 return Entry;
398 }
399
400 VOID
401 NTAPI
402 MiSpecialPoolCheckPattern(PUCHAR P, PPOOL_HEADER Header)
403 {
404 ULONG BytesToCheck, BytesRequested, Index;
405 PUCHAR Ptr;
406
407 /* Get amount of bytes user requested to be allocated by clearing out the paged mask */
408 BytesRequested = (Header->Ulong1 & ~SPECIAL_POOL_PAGED) & 0xFFFF;
409 ASSERT(BytesRequested <= PAGE_SIZE - sizeof(POOL_HEADER));
410
411 /* Get a pointer to the end of user's area */
412 Ptr = P + BytesRequested;
413
414 /* Calculate how many bytes to check */
415 BytesToCheck = (ULONG)((PUCHAR)PAGE_ALIGN(P) + PAGE_SIZE - Ptr);
416
417 /* Remove pool header size if we're catching underruns */
418 if (((ULONG_PTR)P & (PAGE_SIZE - 1)) == 0)
419 {
420 /* User buffer is located in the beginning of the page */
421 BytesToCheck -= sizeof(POOL_HEADER);
422 }
423
424 /* Check the pattern after user buffer */
425 for (Index = 0; Index < BytesToCheck; Index++)
426 {
427 /* Bugcheck if bytes don't match */
428 if (Ptr[Index] != Header->BlockSize)
429 {
430 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION,
431 (ULONG_PTR)P,
432 (ULONG_PTR)&Ptr[Index],
433 Header->BlockSize,
434 0x24);
435 }
436 }
437 }
438
439 VOID
440 NTAPI
441 MmFreeSpecialPool(PVOID P)
442 {
443 PMMPTE PointerPte;
444 PPOOL_HEADER Header;
445 BOOLEAN Overruns = FALSE;
446 KIRQL Irql = KeGetCurrentIrql();
447 POOL_TYPE PoolType;
448 ULONG BytesRequested, BytesReal = 0;
449 ULONG PtrOffset;
450 PUCHAR b;
451 PMI_FREED_SPECIAL_POOL FreedHeader;
452 LARGE_INTEGER TickCount;
453 PMMPFN Pfn;
454
455 DPRINT("MmFreeSpecialPool(%p)\n", P);
456
457 /* Get the PTE */
458 PointerPte = MiAddressToPte(P);
459
460 /* Check if it's valid */
461 if (PointerPte->u.Hard.Valid == 0)
462 {
463 /* Bugcheck if it has NOACCESS or 0 set as protection */
464 if (PointerPte->u.Soft.Protection == MM_NOACCESS ||
465 !PointerPte->u.Soft.Protection)
466 {
467 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION,
468 (ULONG_PTR)P,
469 (ULONG_PTR)PointerPte,
470 0,
471 0x20);
472 }
473 }
474
475 /* Determine if it's a underruns or overruns pool pointer */
476 PtrOffset = (ULONG)((ULONG_PTR)P & (PAGE_SIZE - 1));
477 if (PtrOffset)
478 {
479 /* Pool catches overruns */
480 Header = PAGE_ALIGN(P);
481 Overruns = TRUE;
482 }
483 else
484 {
485 /* Pool catches underruns */
486 Header = (PPOOL_HEADER)((PUCHAR)PAGE_ALIGN(P) + PAGE_SIZE - sizeof(POOL_HEADER));
487 }
488
489 /* Check if it's non paged pool */
490 if ((Header->Ulong1 & SPECIAL_POOL_PAGED) == 0)
491 {
492 /* Non-paged allocation, ensure that IRQ is not higher that DISPATCH */
493 PoolType = NonPagedPool;
494 ASSERT(PointerPte[1].u.Soft.PageFileHigh == SPECIAL_POOL_NONPAGED_PTE);
495 if (Irql > DISPATCH_LEVEL)
496 {
497 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION,
498 Irql,
499 PoolType,
500 (ULONG_PTR)P,
501 0x31);
502 }
503 }
504 else
505 {
506 /* Paged allocation, ensure */
507 PoolType = PagedPool;
508 ASSERT(PointerPte[1].u.Soft.PageFileHigh == SPECIAL_POOL_PAGED_PTE);
509 if (Irql > APC_LEVEL)
510 {
511 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION,
512 Irql,
513 PoolType,
514 (ULONG_PTR)P,
515 0x31);
516 }
517 }
518
519 /* Get amount of bytes user requested to be allocated by clearing out the paged mask */
520 BytesRequested = (Header->Ulong1 & ~SPECIAL_POOL_PAGED) & 0xFFFF;
521 ASSERT(BytesRequested <= PAGE_SIZE - sizeof(POOL_HEADER));
522
523 /* Check memory before the allocated user buffer in case of overruns detection */
524 if (Overruns)
525 {
526 /* Calculate the real placement of the buffer */
527 BytesReal = PAGE_SIZE - PtrOffset;
528
529 /* If they mismatch, it's unrecoverable */
530 if (BytesRequested > BytesReal)
531 {
532 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION,
533 (ULONG_PTR)P,
534 BytesRequested,
535 BytesReal,
536 0x21);
537 }
538
539 if (BytesRequested + sizeof(POOL_HEADER) < BytesReal)
540 {
541 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION,
542 (ULONG_PTR)P,
543 BytesRequested,
544 BytesReal,
545 0x22);
546 }
547
548 /* Actually check the memory pattern */
549 for (b = (PUCHAR)(Header + 1); b < (PUCHAR)P; b++)
550 {
551 if (*b != Header->BlockSize)
552 {
553 /* Bytes mismatch */
554 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION,
555 (ULONG_PTR)P,
556 (ULONG_PTR)b,
557 Header->BlockSize,
558 0x23);
559 }
560 }
561 }
562
563 /* Check the memory pattern after the user buffer */
564 MiSpecialPoolCheckPattern(P, Header);
565
566 /* Fill the freed header */
567 KeQueryTickCount(&TickCount);
568 FreedHeader = (PMI_FREED_SPECIAL_POOL)PAGE_ALIGN(P);
569 FreedHeader->Signature = 0x98764321;
570 FreedHeader->TickCount = TickCount.LowPart;
571 FreedHeader->NumberOfBytesRequested = BytesRequested;
572 FreedHeader->Pagable = PoolType;
573 FreedHeader->VirtualAddress = P;
574 FreedHeader->Thread = PsGetCurrentThread();
575 /* TODO: Fill StackPointer and StackBytes */
576 FreedHeader->StackPointer = NULL;
577 FreedHeader->StackBytes = 0;
578
579 if (PoolType == NonPagedPool)
580 {
581 /* Non pagable. Get PFN element corresponding to the PTE */
582 Pfn = MI_PFN_ELEMENT(PointerPte->u.Hard.PageFrameNumber);
583
584 /* Count the page as free */
585 InterlockedDecrementUL(&MiSpecialPagesNonPaged);
586
587 /* Lock PFN database */
588 Irql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
589
590 /* Delete this PFN */
591 MI_SET_PFN_DELETED(Pfn);
592
593 /* Decrement share count of this PFN */
594 MiDecrementShareCount(Pfn, PointerPte->u.Hard.PageFrameNumber);
595
596 MI_ERASE_PTE(PointerPte);
597
598 /* Flush the TLB */
599 //FIXME: Use KeFlushSingleTb() instead
600 KeFlushEntireTb(TRUE, TRUE);
601 }
602 else
603 {
604 /* Pagable. Delete that virtual address */
605 MiDeleteSystemPageableVm(PointerPte, 1, 0, NULL);
606
607 /* Count the page as free */
608 InterlockedDecrementUL(&MiSpecialPagesPagable);
609
610 /* Lock PFN database */
611 Irql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
612 }
613
614 /* Mark next PTE as invalid */
615 MI_ERASE_PTE(PointerPte + 1);
616
617 /* Make sure that the last entry is really the last one */
618 ASSERT(MiSpecialPoolLastPte->u.List.NextEntry == MM_EMPTY_PTE_LIST);
619
620 /* Update the current last PTE next pointer */
621 MiSpecialPoolLastPte->u.List.NextEntry = PointerPte - MmSystemPteBase;
622
623 /* PointerPte becomes the new last PTE */
624 PointerPte->u.List.NextEntry = MM_EMPTY_PTE_LIST;
625 MiSpecialPoolLastPte = PointerPte;
626
627 /* Release the PFN database lock */
628 KeReleaseQueuedSpinLock(LockQueuePfnLock, Irql);
629
630 /* Update page counter */
631 InterlockedDecrementUL(&MmSpecialPagesInUse);
632 }
633
634 VOID
635 NTAPI
636 MiTestSpecialPool(VOID)
637 {
638 ULONG i;
639 PVOID p1, p2[100];
640 //PUCHAR p3;
641 ULONG ByteSize;
642 POOL_TYPE PoolType = PagedPool;
643
644 // First allocate/free
645 for (i=0; i<100; i++)
646 {
647 ByteSize = (100 * (i+1)) % (PAGE_SIZE - sizeof(POOL_HEADER));
648 p1 = MmAllocateSpecialPool(ByteSize, 'TEST', PoolType, 0);
649 DPRINT1("p1 %p size %lu\n", p1, ByteSize);
650 MmFreeSpecialPool(p1);
651 }
652
653 // Now allocate all at once, then free at once
654 for (i=0; i<100; i++)
655 {
656 ByteSize = (100 * (i+1)) % (PAGE_SIZE - sizeof(POOL_HEADER));
657 p2[i] = MmAllocateSpecialPool(ByteSize, 'TEST', PoolType, 0);
658 DPRINT1("p2[%lu] %p size %lu\n", i, p1, ByteSize);
659 }
660 for (i=0; i<100; i++)
661 {
662 DPRINT1("Freeing %p\n", p2[i]);
663 MmFreeSpecialPool(p2[i]);
664 }
665
666 // Overrun the buffer to test
667 //ByteSize = 16;
668 //p3 = MmAllocateSpecialPool(ByteSize, 'TEST', NonPagedPool, 0);
669 //p3[ByteSize] = 0xF1; // This should cause an exception
670
671 // Underrun the buffer to test
672 //p3 = MmAllocateSpecialPool(ByteSize, 'TEST', NonPagedPool, 1);
673 //p3--;
674 //*p3 = 0xF1; // This should cause an exception
675
676 }
677
678 /* EOF */