[NTOS:MM]
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / special.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/special.c
5 * PURPOSE: ARM Memory Manager Special Pool implementation
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /*
10 References:
11 http://msdn.microsoft.com/en-us/library/ff551832(v=VS.85).aspx
12 */
13
14 /* INCLUDES *******************************************************************/
15
16 #include <ntoskrnl.h>
17 #define NDEBUG
18 #include <debug.h>
19
20 #define MODULE_INVOLVED_IN_ARM3
21 #include <mm/ARM3/miarm.h>
22
23 extern ULONG ExpPoolFlags;
24 extern PMMPTE MmSystemPteBase;
25
26 PMMPTE
27 NTAPI
28 MiReserveAlignedSystemPtes(IN ULONG NumberOfPtes,
29 IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType,
30 IN ULONG Alignment);
31
32 /* GLOBALS ********************************************************************/
33
34 #define SPECIAL_POOL_PAGED_PTE 0x2000
35 #define SPECIAL_POOL_NONPAGED_PTE 0x4000
36 #define SPECIAL_POOL_PAGED 0x8000
37
38 PVOID MmSpecialPoolStart;
39 PVOID MmSpecialPoolEnd;
40 PVOID MiSpecialPoolExtra;
41 ULONG MiSpecialPoolExtraCount;
42
43 PMMPTE MiSpecialPoolFirstPte;
44 PMMPTE MiSpecialPoolLastPte;
45
46 PFN_NUMBER MiSpecialPagesNonPagedMaximum;
47
48 BOOLEAN MmSpecialPoolCatchOverruns = TRUE;
49
50 typedef struct _MI_FREED_SPECIAL_POOL
51 {
52 POOL_HEADER OverlaidPoolHeader;
53 /* TODO: Add overlaid verifier pool header */
54 ULONG Signature;
55 ULONG TickCount;
56 ULONG NumberOfBytesRequested;
57 BOOLEAN Pagable;
58 PVOID VirtualAddress;
59 PVOID StackPointer;
60 ULONG StackBytes;
61 PETHREAD Thread;
62 UCHAR StackData[0x400];
63 } MI_FREED_SPECIAL_POOL, *PMI_FREED_SPECIAL_POOL;
64
65 /* PRIVATE FUNCTIONS **********************************************************/
66
67 VOID NTAPI MiTestSpecialPool();
68
69 BOOLEAN
70 NTAPI
71 MmUseSpecialPool(SIZE_T NumberOfBytes, ULONG Tag)
72 {
73 /* Special pool is not suitable for allocations bigger than 1 page */
74 if (NumberOfBytes > (PAGE_SIZE - sizeof(POOL_HEADER)))
75 return FALSE;
76
77 return Tag == MmSpecialPoolTag;
78 }
79
80 BOOLEAN
81 NTAPI
82 MmIsSpecialPoolAddress(PVOID P)
83 {
84 return ((P >= MmSpecialPoolStart) &&
85 (P <= MmSpecialPoolEnd));
86 }
87
88 BOOLEAN
89 NTAPI
90 MmIsSpecialPoolAddressFree(PVOID P)
91 {
92 PMMPTE PointerPte;
93
94 ASSERT(MmIsSpecialPoolAddress(P));
95 PointerPte = MiAddressToPte(P);
96
97 if (PointerPte->u.Soft.PageFileHigh == SPECIAL_POOL_PAGED_PTE ||
98 PointerPte->u.Soft.PageFileHigh == SPECIAL_POOL_NONPAGED_PTE)
99 {
100 /* Guard page PTE */
101 return FALSE;
102 }
103
104 /* Free PTE */
105 return TRUE;
106 }
107
108 VOID
109 NTAPI
110 MiInitializeSpecialPool(VOID)
111 {
112 ULONG SpecialPoolPtes, i;
113 PMMPTE PointerPte;
114
115 /* Check if there is a special pool tag */
116 if ((MmSpecialPoolTag == 0) ||
117 (MmSpecialPoolTag == -1)) return;
118
119 /* Calculate number of system PTEs for the special pool */
120 if (MmNumberOfSystemPtes >= 0x3000)
121 SpecialPoolPtes = MmNumberOfSystemPtes / 3;
122 else
123 SpecialPoolPtes = MmNumberOfSystemPtes / 6;
124
125 /* Don't let the number go too high */
126 if (SpecialPoolPtes > 0x6000) SpecialPoolPtes = 0x6000;
127
128 /* Round up to the page size */
129 SpecialPoolPtes = PAGE_ROUND_UP(SpecialPoolPtes);
130
131 ASSERT((SpecialPoolPtes & (PTE_PER_PAGE - 1)) == 0);
132
133 /* Reserve those PTEs */
134 do
135 {
136 PointerPte = MiReserveAlignedSystemPtes(SpecialPoolPtes, 0, /*0x400000*/0); // FIXME:
137 if (PointerPte) break;
138
139 /* Reserving didn't work, so try to reduce the requested size */
140 ASSERT(SpecialPoolPtes >= PTE_PER_PAGE);
141 SpecialPoolPtes -= PTE_PER_PAGE;
142 } while (SpecialPoolPtes);
143
144 /* Fail if we couldn't reserve them at all */
145 if (!SpecialPoolPtes) return;
146
147 /* Make sure we got enough */
148 ASSERT(SpecialPoolPtes >= PTE_PER_PAGE);
149
150 /* Save first PTE and its address */
151 MiSpecialPoolFirstPte = PointerPte;
152 MmSpecialPoolStart = MiPteToAddress(PointerPte);
153
154 for (i = 0; i < PTE_PER_PAGE / 2; i++)
155 {
156 /* Point it to the next entry */
157 PointerPte->u.List.NextEntry = &PointerPte[2] - MmSystemPteBase;
158
159 /* Move to the next pair */
160 PointerPte += 2;
161 }
162
163 /* Save extra values */
164 MiSpecialPoolExtra = PointerPte;
165 MiSpecialPoolExtraCount = SpecialPoolPtes - PTE_PER_PAGE;
166
167 /* Mark the previous PTE as the last one */
168 MiSpecialPoolLastPte = PointerPte - 2;
169 MiSpecialPoolLastPte->u.List.NextEntry = MM_EMPTY_PTE_LIST;
170
171 /* Save end address of the special pool */
172 MmSpecialPoolEnd = MiPteToAddress(MiSpecialPoolLastPte + 1);
173
174 /* Calculate maximum non-paged part of the special pool */
175 MiSpecialPagesNonPagedMaximum = MmResidentAvailablePages >> 4;
176
177 /* And limit it if it turned out to be too big */
178 if (MmNumberOfPhysicalPages > 0x3FFF)
179 MiSpecialPagesNonPagedMaximum = MmResidentAvailablePages >> 3;
180
181 DPRINT1("Special pool start %p - end %p\n", MmSpecialPoolStart, MmSpecialPoolEnd);
182 ExpPoolFlags |= POOL_FLAG_SPECIAL_POOL;
183
184 //MiTestSpecialPool();
185 }
186
187 NTSTATUS
188 NTAPI
189 MmExpandSpecialPool(VOID)
190 {
191 ULONG i;
192 PMMPTE PointerPte;
193
194 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
195
196 if (MiSpecialPoolExtraCount == 0)
197 return STATUS_INSUFFICIENT_RESOURCES;
198
199 PointerPte = MiSpecialPoolExtra;
200 ASSERT(MiSpecialPoolFirstPte == MiSpecialPoolLastPte);
201 ASSERT(MiSpecialPoolFirstPte->u.List.NextEntry == MM_EMPTY_PTE_LIST);
202 MiSpecialPoolFirstPte->u.List.NextEntry = PointerPte - MmSystemPteBase;
203
204 ASSERT(MiSpecialPoolExtraCount >= PTE_PER_PAGE);
205 for (i = 0; i < PTE_PER_PAGE / 2; i++)
206 {
207 /* Point it to the next entry */
208 PointerPte->u.List.NextEntry = &PointerPte[2] - MmSystemPteBase;
209
210 /* Move to the next pair */
211 PointerPte += 2;
212 }
213
214 /* Save remaining extra values */
215 MiSpecialPoolExtra = PointerPte;
216 MiSpecialPoolExtraCount -= PTE_PER_PAGE;
217
218 /* Mark the previous PTE as the last one */
219 MiSpecialPoolLastPte = PointerPte - 2;
220 MiSpecialPoolLastPte->u.List.NextEntry = MM_EMPTY_PTE_LIST;
221
222 /* Save new end address of the special pool */
223 MmSpecialPoolEnd = MiPteToAddress(MiSpecialPoolLastPte + 1);
224
225 return STATUS_SUCCESS;
226 }
227
228 PVOID
229 NTAPI
230 MmAllocateSpecialPool(SIZE_T NumberOfBytes, ULONG Tag, POOL_TYPE PoolType, ULONG SpecialType)
231 {
232 KIRQL Irql;
233 MMPTE TempPte = ValidKernelPte;
234 PMMPTE PointerPte;
235 PFN_NUMBER PageFrameNumber;
236 LARGE_INTEGER TickCount;
237 PVOID Entry;
238 PPOOL_HEADER Header;
239
240 DPRINT("MmAllocateSpecialPool(%x %x %x %x)\n", NumberOfBytes, Tag, PoolType, SpecialType);
241
242 /* Check if the pool is initialized and quit if it's not */
243 if (!MiSpecialPoolFirstPte) return NULL;
244
245 /* Get the pool type */
246 PoolType &= BASE_POOL_TYPE_MASK;
247
248 /* Check whether current IRQL matches the pool type */
249 Irql = KeGetCurrentIrql();
250
251 if (((PoolType == PagedPool) && (Irql > APC_LEVEL)) ||
252 ((PoolType != PagedPool) && (Irql > DISPATCH_LEVEL)))
253 {
254 /* Bad caller */
255 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION,
256 Irql,
257 PoolType,
258 NumberOfBytes,
259 0x30);
260 }
261
262 /* TODO: Take into account various limitations */
263 /*if ((PoolType != NonPagedPool) &&
264 MiSpecialPagesNonPaged > MiSpecialPagesNonPagedMaximum)*/
265
266 /* Lock PFN database */
267 Irql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
268
269 /* Reject allocation in case amount of available pages is too small */
270 if (MmAvailablePages < 0x100)
271 {
272 /* Release the PFN database lock */
273 KeReleaseQueuedSpinLock(LockQueuePfnLock, Irql);
274 DPRINT1("Special pool: MmAvailablePages 0x%x is too small\n", MmAvailablePages);
275 return NULL;
276 }
277
278 /* Check if special pool PTE list is exhausted */
279 if (MiSpecialPoolFirstPte->u.List.NextEntry == MM_EMPTY_PTE_LIST)
280 {
281 /* Try to expand it */
282 if (!NT_SUCCESS(MmExpandSpecialPool()))
283 {
284 /* No reserves left, reject this allocation */
285 static int once;
286 KeReleaseQueuedSpinLock(LockQueuePfnLock, Irql);
287 if (!once++) DPRINT1("Special pool: No PTEs left!\n");
288 return NULL;
289 }
290 ASSERT(MiSpecialPoolFirstPte->u.List.NextEntry != MM_EMPTY_PTE_LIST);
291 }
292
293 /* Save allocation time */
294 KeQueryTickCount(&TickCount);
295
296 /* Get a pointer to the first PTE */
297 PointerPte = MiSpecialPoolFirstPte;
298
299 /* Set the first PTE pointer to the next one in the list */
300 MiSpecialPoolFirstPte = MmSystemPteBase + PointerPte->u.List.NextEntry;
301
302 /* Allocate a physical page */
303 PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());
304
305 /* Initialize PFN and make it valid */
306 TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
307 MiInitializePfnAndMakePteValid(PageFrameNumber, PointerPte, TempPte);
308
309 /* Release the PFN database lock */
310 KeReleaseQueuedSpinLock(LockQueuePfnLock, Irql);
311
312 /* Put some content into the page. Low value of tick count would do */
313 Entry = MiPteToAddress(PointerPte);
314 RtlFillMemory(Entry, PAGE_SIZE, TickCount.LowPart);
315
316 /* Calculate header and entry addresses */
317 if ((SpecialType != 0) &&
318 ((SpecialType == 1) || (!MmSpecialPoolCatchOverruns)))
319 {
320 /* We catch underruns. Data is at the beginning of the page */
321 Header = (PPOOL_HEADER)((PUCHAR)Entry + PAGE_SIZE - sizeof(POOL_HEADER));
322 }
323 else
324 {
325 /* We catch overruns. Data is at the end of the page */
326 Header = (PPOOL_HEADER)Entry;
327 Entry = (PVOID)((ULONG_PTR)((PUCHAR)Entry - NumberOfBytes + PAGE_SIZE) & ~((LONG_PTR)sizeof(POOL_HEADER) - 1));
328 }
329
330 /* Initialize the header */
331 RtlZeroMemory(Header, sizeof(POOL_HEADER));
332
333 /* Save allocation size there */
334 Header->Ulong1 = (ULONG)NumberOfBytes;
335
336 /* Make sure it's all good */
337 ASSERT((NumberOfBytes <= PAGE_SIZE - sizeof(POOL_HEADER)) &&
338 (PAGE_SIZE <= 32 * 1024));
339
340 /* Mark it as paged or nonpaged */
341 if (PoolType == PagedPool)
342 {
343 /* Add pagedpool flag into the pool header too */
344 Header->Ulong1 |= SPECIAL_POOL_PAGED;
345
346 /* Also mark the next PTE as special-pool-paged */
347 PointerPte[1].u.Soft.PageFileHigh |= SPECIAL_POOL_PAGED_PTE;
348 }
349 else
350 {
351 /* Mark the next PTE as special-pool-nonpaged */
352 PointerPte[1].u.Soft.PageFileHigh |= SPECIAL_POOL_NONPAGED_PTE;
353 }
354
355 /* Finally save tag and put allocation time into the header's blocksize.
356 That time will be used to check memory consistency within the allocated
357 page. */
358 Header->PoolTag = Tag;
359 Header->BlockSize = (UCHAR)TickCount.LowPart;
360 DPRINT("%p\n", Entry);
361 return Entry;
362 }
363
364 VOID
365 NTAPI
366 MiSpecialPoolCheckPattern(PUCHAR P, PPOOL_HEADER Header)
367 {
368 ULONG BytesToCheck, BytesRequested, Index;
369 PUCHAR Ptr;
370
371 /* Get amount of bytes user requested to be allocated by clearing out the paged mask */
372 BytesRequested = (Header->Ulong1 & ~SPECIAL_POOL_PAGED) & 0xFFFF;
373 ASSERT(BytesRequested <= PAGE_SIZE - sizeof(POOL_HEADER));
374
375 /* Get a pointer to the end of user's area */
376 Ptr = P + BytesRequested;
377
378 /* Calculate how many bytes to check */
379 BytesToCheck = (ULONG)((PUCHAR)PAGE_ALIGN(P) + PAGE_SIZE - Ptr);
380
381 /* Remove pool header size if we're catching underruns */
382 if (((ULONG_PTR)P & (PAGE_SIZE - 1)) == 0)
383 {
384 /* User buffer is located in the beginning of the page */
385 BytesToCheck -= sizeof(POOL_HEADER);
386 }
387
388 /* Check the pattern after user buffer */
389 for (Index = 0; Index < BytesToCheck; Index++)
390 {
391 /* Bugcheck if bytes don't match */
392 if (Ptr[Index] != Header->BlockSize)
393 {
394 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION,
395 (ULONG_PTR)P,
396 (ULONG_PTR)&Ptr[Index],
397 Header->BlockSize,
398 0x24);
399 }
400 }
401 }
402
403 VOID
404 NTAPI
405 MmFreeSpecialPool(PVOID P)
406 {
407 PMMPTE PointerPte;
408 PPOOL_HEADER Header;
409 BOOLEAN Overruns = FALSE;
410 KIRQL Irql = KeGetCurrentIrql();
411 POOL_TYPE PoolType;
412 ULONG BytesRequested, BytesReal = 0;
413 ULONG PtrOffset;
414 PUCHAR b;
415 PMI_FREED_SPECIAL_POOL FreedHeader;
416 LARGE_INTEGER TickCount;
417 PMMPFN Pfn;
418
419 DPRINT("MmFreeSpecialPool(%p)\n", P);
420
421 /* Get the PTE */
422 PointerPte = MiAddressToPte(P);
423
424 /* Check if it's valid */
425 if (PointerPte->u.Hard.Valid == 0)
426 {
427 /* Bugcheck if it has NOACCESS or 0 set as protection */
428 if (PointerPte->u.Soft.Protection == MM_NOACCESS ||
429 !PointerPte->u.Soft.Protection)
430 {
431 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION,
432 (ULONG_PTR)P,
433 (ULONG_PTR)PointerPte,
434 0,
435 0x20);
436 }
437 }
438
439 /* Determine if it's a underruns or overruns pool pointer */
440 PtrOffset = (ULONG)((ULONG_PTR)P & (PAGE_SIZE - 1));
441 if (PtrOffset)
442 {
443 /* Pool catches overruns */
444 Header = PAGE_ALIGN(P);
445 Overruns = TRUE;
446 }
447 else
448 {
449 /* Pool catches underruns */
450 Header = (PPOOL_HEADER)((PUCHAR)PAGE_ALIGN(P) + PAGE_SIZE - sizeof(POOL_HEADER));
451 }
452
453 /* Check if it's non paged pool */
454 if ((Header->Ulong1 & SPECIAL_POOL_PAGED) == 0)
455 {
456 /* Non-paged allocation, ensure that IRQ is not higher that DISPATCH */
457 PoolType = NonPagedPool;
458 ASSERT(PointerPte[1].u.Soft.PageFileHigh == SPECIAL_POOL_NONPAGED_PTE);
459 if (Irql > DISPATCH_LEVEL)
460 {
461 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION,
462 Irql,
463 PoolType,
464 (ULONG_PTR)P,
465 0x31);
466 }
467 }
468 else
469 {
470 /* Paged allocation, ensure */
471 PoolType = PagedPool;
472 ASSERT(PointerPte[1].u.Soft.PageFileHigh == SPECIAL_POOL_PAGED_PTE);
473 if (Irql > APC_LEVEL)
474 {
475 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION,
476 Irql,
477 PoolType,
478 (ULONG_PTR)P,
479 0x31);
480 }
481 }
482
483 /* Get amount of bytes user requested to be allocated by clearing out the paged mask */
484 BytesRequested = (Header->Ulong1 & ~SPECIAL_POOL_PAGED) & 0xFFFF;
485 ASSERT(BytesRequested <= PAGE_SIZE - sizeof(POOL_HEADER));
486
487 /* Check memory before the allocated user buffer in case of overruns detection */
488 if (Overruns)
489 {
490 /* Calculate the real placement of the buffer */
491 BytesReal = PAGE_SIZE - PtrOffset;
492
493 /* If they mismatch, it's unrecoverable */
494 if (BytesRequested > BytesReal)
495 {
496 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION,
497 (ULONG_PTR)P,
498 BytesRequested,
499 BytesReal,
500 0x21);
501 }
502
503 if (BytesRequested + sizeof(POOL_HEADER) < BytesReal)
504 {
505 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION,
506 (ULONG_PTR)P,
507 BytesRequested,
508 BytesReal,
509 0x22);
510 }
511
512 /* Actually check the memory pattern */
513 for (b = (PUCHAR)(Header + 1); b < (PUCHAR)P; b++)
514 {
515 if (*b != Header->BlockSize)
516 {
517 /* Bytes mismatch */
518 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION,
519 (ULONG_PTR)P,
520 (ULONG_PTR)b,
521 Header->BlockSize,
522 0x23);
523 }
524 }
525 }
526
527 /* Check the memory pattern after the user buffer */
528 MiSpecialPoolCheckPattern(P, Header);
529
530 /* Fill the freed header */
531 KeQueryTickCount(&TickCount);
532 FreedHeader = (PMI_FREED_SPECIAL_POOL)PAGE_ALIGN(P);
533 FreedHeader->Signature = 0x98764321;
534 FreedHeader->TickCount = TickCount.LowPart;
535 FreedHeader->NumberOfBytesRequested = BytesRequested;
536 FreedHeader->Pagable = PoolType;
537 FreedHeader->VirtualAddress = P;
538 FreedHeader->Thread = PsGetCurrentThread();
539 /* TODO: Fill StackPointer and StackBytes */
540 FreedHeader->StackPointer = NULL;
541 FreedHeader->StackBytes = 0;
542
543 if (PoolType == NonPagedPool)
544 {
545 /* Non pagable. Get PFN element corresponding to the PTE */
546 Pfn = MI_PFN_ELEMENT(PointerPte->u.Hard.PageFrameNumber);
547
548 /* Lock PFN database */
549 Irql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
550
551 /* Delete this PFN */
552 MI_SET_PFN_DELETED(Pfn);
553
554 /* Decrement share count of this PFN */
555 MiDecrementShareCount(Pfn, PointerPte->u.Hard.PageFrameNumber);
556
557 MI_ERASE_PTE(PointerPte);
558
559 /* Flush the TLB */
560 //FIXME: Use KeFlushSingleTb() instead
561 KeFlushEntireTb(TRUE, TRUE);
562 }
563 else
564 {
565 /* Pagable. Delete that virtual address */
566 MiDeleteSystemPageableVm(PointerPte, 1, 0, NULL);
567
568 /* Lock PFN database */
569 Irql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
570 }
571
572 /* Mark next PTE as invalid */
573 MI_ERASE_PTE(PointerPte + 1);
574
575 /* Make sure that the last entry is really the last one */
576 ASSERT(MiSpecialPoolLastPte->u.List.NextEntry == MM_EMPTY_PTE_LIST);
577
578 /* Update the current last PTE next pointer */
579 MiSpecialPoolLastPte->u.List.NextEntry = PointerPte - MmSystemPteBase;
580
581 /* PointerPte becomes the new last PTE */
582 PointerPte->u.List.NextEntry = MM_EMPTY_PTE_LIST;
583 MiSpecialPoolLastPte = PointerPte;
584
585 /* Release the PFN database lock */
586 KeReleaseQueuedSpinLock(LockQueuePfnLock, Irql);
587 }
588
589 VOID
590 NTAPI
591 MiTestSpecialPool(VOID)
592 {
593 ULONG i;
594 PVOID p1, p2[100];
595 //PUCHAR p3;
596 ULONG ByteSize;
597 POOL_TYPE PoolType = PagedPool;
598
599 // First allocate/free
600 for (i=0; i<100; i++)
601 {
602 ByteSize = (100 * (i+1)) % (PAGE_SIZE - sizeof(POOL_HEADER));
603 p1 = MmAllocateSpecialPool(ByteSize, 'TEST', PoolType, 0);
604 DPRINT1("p1 %p size %lu\n", p1, ByteSize);
605 MmFreeSpecialPool(p1);
606 }
607
608 // Now allocate all at once, then free at once
609 for (i=0; i<100; i++)
610 {
611 ByteSize = (100 * (i+1)) % (PAGE_SIZE - sizeof(POOL_HEADER));
612 p2[i] = MmAllocateSpecialPool(ByteSize, 'TEST', PoolType, 0);
613 DPRINT1("p2[%lu] %p size %lu\n", i, p1, ByteSize);
614 }
615 for (i=0; i<100; i++)
616 {
617 DPRINT1("Freeing %p\n", p2[i]);
618 MmFreeSpecialPool(p2[i]);
619 }
620
621 // Overrun the buffer to test
622 //ByteSize = 16;
623 //p3 = MmAllocateSpecialPool(ByteSize, 'TEST', NonPagedPool, 0);
624 //p3[ByteSize] = 0xF1; // This should cause an exception
625
626 // Underrun the buffer to test
627 //p3 = MmAllocateSpecialPool(ByteSize, 'TEST', NonPagedPool, 1);
628 //p3--;
629 //*p3 = 0xF1; // This should cause an exception
630
631 }
632
633 /* EOF */