Implemented the 'red zone check' for the non paged pool.
[reactos.git] / reactos / ntoskrnl / mm / npool.c
1 /* $Id$
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/npool.c
6 * PURPOSE: Implements the kernel memory pool
7 *
8 * PROGRAMMERS: David Welch (welch@cwcom.net)
9 * Iwan Fatahi (i_fatahi@hotmail.com)
10 * Robert Bergkvist (fragdance@hotmail.com)
11 * Hartmut Birr
12 */
13
14 /* INCLUDES ****************************************************************/
15
16 #include <ntoskrnl.h>
17 #define NDEBUG
18 #include <internal/debug.h>
19
20 #if defined (ALLOC_PRAGMA)
21 #pragma alloc_text(INIT, MiInitializeNonPagedPool)
22 #endif
23
24 #ifdef ENABLE_VALIDATE_POOL
25 #define VALIDATE_POOL validate_kernel_pool()
26 #else
27 #define VALIDATE_POOL
28 #endif
29
30 #if 0
31 #define POOL_TRACE(args...) do { DbgPrint(args); } while(0);
32 #else
33 #if defined(__GNUC__)
34 #define POOL_TRACE(args...)
35 #else
36 #define POOL_TRACE
37 #endif /* __GNUC__ */
38 #endif
39
40 VOID MmPrintMemoryStatistic(VOID);
41
42 #define NPOOL_REDZONE_CHECK /* check the block at deallocation */
43 // #define NPOOL_REDZONE_CHECK_FULL /* check all blocks at each allocation/deallocation */
44 #define NPOOL_REDZONE_SIZE 8 /* number of red zone bytes */
45 #define NPOOL_REDZONE_LOVALUE 0x87
46 #define NPOOL_REDZONE_HIVALUE 0xA5
47
48
49 /* avl types ****************************************************************/
50
51 /* FIXME:
52 * This declarations should be moved into a separate header file.
53 */
54
55 typedef struct _NODE
56 {
57 struct _NODE* link[2];
58 struct _NODE* parent;
59 signed char balance;
60 }
61 NODE, *PNODE;
62
63 /* TYPES *******************************************************************/
64
65 #define BLOCK_HDR_USED_MAGIC (0xdeadbeef)
66 #define BLOCK_HDR_FREE_MAGIC (0xceadbeef)
67
68 /*
69 * fields present at the start of a block (this is for internal use only)
70 */
71 typedef struct _HDR
72 {
73 ULONG Magic;
74 ULONG Size;
75 struct _HDR* previous;
76 } HDR, *PHDR;
77
78 typedef struct _HDR_USED
79 {
80 HDR hdr;
81 LIST_ENTRY ListEntry;
82 ULONG Tag;
83 PVOID Caller;
84 LIST_ENTRY TagListEntry;
85 #if defined(NPOOL_REDZONE_CHECK) || defined(NPOOL_REDZONE_CHECK_FULL)
86 ULONG UserSize;
87 #endif
88 BOOLEAN Dumped;
89 } HDR_USED, *PHDR_USED;
90
91 typedef struct _HDR_FREE
92 {
93 HDR hdr;
94 NODE Node;
95 } HDR_FREE, *PHDR_FREE;
96
97 #define HDR_FREE_SIZE ROUND_UP(sizeof(HDR_FREE), MM_POOL_ALIGNMENT)
98
99 #if defined(NPOOL_REDZONE_CHECK) || #defined(NPOOL_REDZONE_CHECK_FULL)
100 #define HDR_USED_SIZE ROUND_UP(sizeof(HDR_USED) + NPOOL_REDZONE_SIZE, MM_POOL_ALIGNMENT)
101 #else
102 #define HDR_USED_SIZE ROUND_UP(sizeof(HDR_USED), MM_POOL_ALIGNMENT)
103 #endif
104
105 /* GLOBALS *****************************************************************/
106
107 extern PVOID MiNonPagedPoolStart;
108 extern ULONG MiNonPagedPoolLength;
109
110 /*
111 * Head of the list of free blocks
112 */
113 static PNODE FreeBlockListRoot = NULL;
114
115 /*
116 * Head of the list of in use block
117 */
118 static LIST_ENTRY UsedBlockListHead;
119
120 static LIST_ENTRY AddressListHead;
121
122 /*
123 * Count of free blocks
124 */
125 static ULONG EiNrFreeBlocks = 0;
126
127 /*
128 * Count of used blocks
129 */
130 static ULONG EiNrUsedBlocks = 0;
131
132 /*
133 * Lock that protects the non-paged pool data structures
134 */
135 static KSPIN_LOCK MmNpoolLock;
136
137 /*
138 * Total memory used for free nonpaged pool blocks
139 */
140 ULONG EiFreeNonPagedPool = 0;
141
142 /*
143 * Total memory used for nonpaged pool blocks
144 */
145 ULONG EiUsedNonPagedPool = 0;
146
147 /* Total quota for Non Paged Pool */
148 ULONG MmTotalNonPagedPoolQuota = 0;
149
150 #ifdef TAG_STATISTICS_TRACKING
151 #define TAG_HASH_TABLE_SIZE (1024)
152 static LIST_ENTRY tag_hash_table[TAG_HASH_TABLE_SIZE];
153 #endif /* TAG_STATISTICS_TRACKING */
154
155 static PULONG MiNonPagedPoolAllocMap;
156 static ULONG MiNonPagedPoolNrOfPages;
157
158 /* avl helper functions ****************************************************/
159
160 void DumpFreeBlockNode(PNODE p)
161 {
162 static int count = 0;
163 HDR_FREE* blk;
164
165 count++;
166
167 if (p)
168 {
169 DumpFreeBlockNode(p->link[0]);
170 blk = CONTAINING_RECORD(p, HDR_FREE, Node);
171 DbgPrint("%08x %8d (%d)\n", blk, blk->hdr.Size, count);
172 DumpFreeBlockNode(p->link[1]);
173 }
174
175 count--;
176 }
177 void DumpFreeBlockTree(void)
178 {
179 DbgPrint("--- Begin tree ------------------\n");
180 DbgPrint("%08x\n", CONTAINING_RECORD(FreeBlockListRoot, HDR_FREE, Node));
181 DumpFreeBlockNode(FreeBlockListRoot);
182 DbgPrint("--- End tree --------------------\n");
183 }
184
185 int compare_node(PNODE p1, PNODE p2)
186 {
187 HDR_FREE* blk1 = CONTAINING_RECORD(p1, HDR_FREE, Node);
188 HDR_FREE* blk2 = CONTAINING_RECORD(p2, HDR_FREE, Node);
189
190 if (blk1->hdr.Size == blk2->hdr.Size)
191 {
192 if (blk1 < blk2)
193 {
194 return -1;
195 }
196 if (blk1 > blk2)
197 {
198 return 1;
199 }
200 }
201 else
202 {
203 if (blk1->hdr.Size < blk2->hdr.Size)
204 {
205 return -1;
206 }
207 if (blk1->hdr.Size > blk2->hdr.Size)
208 {
209 return 1;
210 }
211 }
212 return 0;
213
214 }
215
216 int compare_value(PVOID value, PNODE p)
217 {
218 HDR_FREE* blk = CONTAINING_RECORD(p, HDR_FREE, Node);
219 ULONG v = *(PULONG)value;
220
221 if (v < blk->hdr.Size)
222 {
223 return -1;
224 }
225 if (v > blk->hdr.Size)
226 {
227 return 1;
228 }
229 return 0;
230 }
231
232 /* avl functions **********************************************************/
233
234 /* FIXME:
235 * The avl functions should be moved into a separate file.
236 */
237
238 /* The avl_insert and avl_remove are based on libavl (library for manipulation of binary trees). */
239
240 void avl_insert (PNODE * root, PNODE n, int (*compare)(PNODE, PNODE))
241 {
242 PNODE y; /* Top node to update balance factor, and parent. */
243 PNODE p, q; /* Iterator, and parent. */
244 PNODE w; /* New root of rebalanced subtree. */
245 int dir = 0; /* Direction to descend. */
246
247 n->link[0] = n->link[1] = n->parent = NULL;
248 n->balance = 0;
249
250 y = *root;
251 for (q = NULL, p = *root; p != NULL; q = p, p = p->link[dir])
252 {
253 dir = compare(n, p) > 0;
254 if (p->balance != 0)
255 {
256 y = p;
257 }
258 }
259
260 n->parent = q;
261 if (q != NULL)
262 {
263 q->link[dir] = n;
264 }
265 else
266 {
267 *root = n;
268 }
269
270 if (*root == n)
271 {
272 return;
273 }
274
275 for (p = n; p != y; p = q)
276 {
277 q = p->parent;
278 dir = q->link[0] != p;
279 if (dir == 0)
280 {
281 q->balance--;
282 }
283 else
284 {
285 q->balance++;
286 }
287 }
288
289 if (y->balance == -2)
290 {
291 PNODE x = y->link[0];
292 if (x->balance == -1)
293 {
294 w = x;
295 y->link[0] = x->link[1];
296 x->link[1] = y;
297 x->balance = y->balance = 0;
298 x->parent = y->parent;
299 y->parent = x;
300 if (y->link[0] != NULL)
301 {
302 y->link[0]->parent = y;
303 }
304 }
305 else
306 {
307 ASSERT(x->balance == +1);
308 w = x->link[1];
309 x->link[1] = w->link[0];
310 w->link[0] = x;
311 y->link[0] = w->link[1];
312 w->link[1] = y;
313 if (w->balance == -1)
314 {
315 x->balance = 0;
316 y->balance = +1;
317 }
318 else if (w->balance == 0)
319 {
320 x->balance = y->balance = 0;
321 }
322 else /* |w->pavl_balance == +1| */
323 {
324 x->balance = -1;
325 y->balance = 0;
326 }
327 w->balance = 0;
328 w->parent = y->parent;
329 x->parent = y->parent = w;
330 if (x->link[1] != NULL)
331 {
332 x->link[1]->parent = x;
333 }
334 if (y->link[0] != NULL)
335 {
336 y->link[0]->parent = y;
337 }
338 }
339 }
340 else if (y->balance == +2)
341 {
342 PNODE x = y->link[1];
343 if (x->balance == +1)
344 {
345 w = x;
346 y->link[1] = x->link[0];
347 x->link[0] = y;
348 x->balance = y->balance = 0;
349 x->parent = y->parent;
350 y->parent = x;
351 if (y->link[1] != NULL)
352 {
353 y->link[1]->parent = y;
354 }
355 }
356 else
357 {
358 ASSERT(x->balance == -1);
359 w = x->link[0];
360 x->link[0] = w->link[1];
361 w->link[1] = x;
362 y->link[1] = w->link[0];
363 w->link[0] = y;
364 if (w->balance == 1)
365 {
366 x->balance = 0;
367 y->balance = -1;
368 }
369 else if (w->balance == 0)
370 {
371 x->balance = y->balance = 0;
372 }
373 else /* |w->pavl_balance == -1| */
374 {
375 x->balance = +1;
376 y->balance = 0;
377 }
378 w->balance = 0;
379 w->parent = y->parent;
380 x->parent = y->parent = w;
381 if (x->link[0] != NULL)
382 {
383 x->link[0]->parent = x;
384 }
385 if (y->link[1] != NULL)
386 {
387 y->link[1]->parent = y;
388 }
389 }
390 }
391 else
392 {
393 return;
394 }
395 if (w->parent != NULL)
396 {
397 w->parent->link[y != w->parent->link[0]] = w;
398 }
399 else
400 {
401 *root = w;
402 }
403
404 return;
405 }
406
407 void avl_remove (PNODE *root, PNODE item, int (*compare)(PNODE, PNODE))
408 {
409 PNODE p; /* Traverses tree to find node to delete. */
410 PNODE q; /* Parent of |p|. */
411 int dir; /* Side of |q| on which |p| is linked. */
412
413 if (root == NULL || *root == NULL)
414 {
415 return ;
416 }
417
418 p = item;
419 q = p->parent;
420 if (q == NULL)
421 {
422 q = (PNODE) root;
423 dir = 0;
424 }
425 else
426 {
427 dir = compare(p, q) > 0;
428 }
429
430 if (p->link[1] == NULL)
431 {
432 q->link[dir] = p->link[0];
433 if (q->link[dir] != NULL)
434 {
435 q->link[dir]->parent = p->parent;
436 }
437 }
438 else
439 {
440 PNODE r = p->link[1];
441 if (r->link[0] == NULL)
442 {
443 r->link[0] = p->link[0];
444 q->link[dir] = r;
445 r->parent = p->parent;
446 if (r->link[0] != NULL)
447 {
448 r->link[0]->parent = r;
449 }
450 r->balance = p->balance;
451 q = r;
452 dir = 1;
453 }
454 else
455 {
456 PNODE s = r->link[0];
457 while (s->link[0] != NULL)
458 {
459 s = s->link[0];
460 }
461 r = s->parent;
462 r->link[0] = s->link[1];
463 s->link[0] = p->link[0];
464 s->link[1] = p->link[1];
465 q->link[dir] = s;
466 if (s->link[0] != NULL)
467 {
468 s->link[0]->parent = s;
469 }
470 s->link[1]->parent = s;
471 s->parent = p->parent;
472 if (r->link[0] != NULL)
473 {
474 r->link[0]->parent = r;
475 }
476 s->balance = p->balance;
477 q = r;
478 dir = 0;
479 }
480 }
481
482 item->link[0] = item->link[1] = item->parent = NULL;
483 item->balance = 0;
484
485 while (q != (PNODE) root)
486 {
487 PNODE y = q;
488
489 if (y->parent != NULL)
490 {
491 q = y->parent;
492 }
493 else
494 {
495 q = (PNODE) root;
496 }
497
498 if (dir == 0)
499 {
500 dir = q->link[0] != y;
501 y->balance++;
502 if (y->balance == +1)
503 {
504 break;
505 }
506 else if (y->balance == +2)
507 {
508 PNODE x = y->link[1];
509 if (x->balance == -1)
510 {
511 PNODE w;
512
513 ASSERT(x->balance == -1);
514 w = x->link[0];
515 x->link[0] = w->link[1];
516 w->link[1] = x;
517 y->link[1] = w->link[0];
518 w->link[0] = y;
519 if (w->balance == +1)
520 {
521 x->balance = 0;
522 y->balance = -1;
523 }
524 else if (w->balance == 0)
525 {
526 x->balance = y->balance = 0;
527 }
528 else /* |w->pavl_balance == -1| */
529 {
530 x->balance = +1;
531 y->balance = 0;
532 }
533 w->balance = 0;
534 w->parent = y->parent;
535 x->parent = y->parent = w;
536 if (x->link[0] != NULL)
537 {
538 x->link[0]->parent = x;
539 }
540 if (y->link[1] != NULL)
541 {
542 y->link[1]->parent = y;
543 }
544 q->link[dir] = w;
545 }
546 else
547 {
548 y->link[1] = x->link[0];
549 x->link[0] = y;
550 x->parent = y->parent;
551 y->parent = x;
552 if (y->link[1] != NULL)
553 {
554 y->link[1]->parent = y;
555 }
556 q->link[dir] = x;
557 if (x->balance == 0)
558 {
559 x->balance = -1;
560 y->balance = +1;
561 break;
562 }
563 else
564 {
565 x->balance = y->balance = 0;
566 y = x;
567 }
568 }
569 }
570 }
571 else
572 {
573 dir = q->link[0] != y;
574 y->balance--;
575 if (y->balance == -1)
576 {
577 break;
578 }
579 else if (y->balance == -2)
580 {
581 PNODE x = y->link[0];
582 if (x->balance == +1)
583 {
584 PNODE w;
585 ASSERT(x->balance == +1);
586 w = x->link[1];
587 x->link[1] = w->link[0];
588 w->link[0] = x;
589 y->link[0] = w->link[1];
590 w->link[1] = y;
591 if (w->balance == -1)
592 {
593 x->balance = 0;
594 y->balance = +1;
595 }
596 else if (w->balance == 0)
597 {
598 x->balance = y->balance = 0;
599 }
600 else /* |w->pavl_balance == +1| */
601 {
602 x->balance = -1;
603 y->balance = 0;
604 }
605 w->balance = 0;
606 w->parent = y->parent;
607 x->parent = y->parent = w;
608 if (x->link[1] != NULL)
609 {
610 x->link[1]->parent = x;
611 }
612 if (y->link[0] != NULL)
613 {
614 y->link[0]->parent = y;
615 }
616 q->link[dir] = w;
617 }
618 else
619 {
620 y->link[0] = x->link[1];
621 x->link[1] = y;
622 x->parent = y->parent;
623 y->parent = x;
624 if (y->link[0] != NULL)
625 {
626 y->link[0]->parent = y;
627 }
628 q->link[dir] = x;
629 if (x->balance == 0)
630 {
631 x->balance = +1;
632 y->balance = -1;
633 break;
634 }
635 else
636 {
637 x->balance = y->balance = 0;
638 y = x;
639 }
640 }
641 }
642 }
643 }
644
645 }
646
647 PNODE _cdecl avl_get_first(PNODE root)
648 {
649 PNODE p;
650 if (root == NULL)
651 {
652 return NULL;
653 }
654 p = root;
655 while (p->link[0])
656 {
657 p = p->link[0];
658 }
659 return p;
660 }
661
662 PNODE avl_get_next(PNODE root, PNODE p)
663 {
664 PNODE q;
665 if (p->link[1])
666 {
667 p = p->link[1];
668 while(p->link[0])
669 {
670 p = p->link[0];
671 }
672 return p;
673 }
674 else
675 {
676 q = p->parent;
677 while (q && q->link[1] == p)
678 {
679 p = q;
680 q = q->parent;
681 }
682 if (q == NULL)
683 {
684 return NULL;
685 }
686 else
687 {
688 return q;
689 }
690 }
691 }
692
693 PNODE avl_find_equal_or_greater(PNODE root, ULONG size, int (compare)(PVOID, PNODE))
694 {
695 PNODE p;
696 PNODE prev = NULL;
697 int cmp;
698
699 for (p = root; p != NULL;)
700 {
701 cmp = compare((PVOID)&size, p);
702 if (cmp < 0)
703 {
704 prev = p;
705 p = p->link[0];
706 }
707 else if (cmp > 0)
708 {
709 p = p->link[1];
710 }
711 else
712 {
713 while (p->link[0])
714 {
715 cmp = compare((PVOID)&size, p->link[0]);
716 if (cmp != 0)
717 {
718 break;
719 }
720 p = p->link[0];
721 }
722 return p;
723 }
724 }
725 return prev;
726 }
727
728 /* non paged pool functions ************************************************/
729
730 #ifdef TAG_STATISTICS_TRACKING
731 VOID
732 MiRemoveFromTagHashTable(HDR_USED* block)
733 /*
734 * Remove a block from the tag hash table
735 */
736 {
737 if (block->Tag == 0)
738 {
739 return;
740 }
741 RemoveEntryList(&block->TagListEntry);
742 }
743
744 VOID
745 MiAddToTagHashTable(HDR_USED* block)
746 /*
747 * Add a block to the tag hash table
748 */
749 {
750 ULONG hash;
751
752 if (block->Tag == 0)
753 {
754 return;
755 }
756
757 hash = block->Tag % TAG_HASH_TABLE_SIZE;
758
759 InsertHeadList(&tag_hash_table[hash], &block->TagListEntry);
760 }
761 #endif /* TAG_STATISTICS_TRACKING */
762
763 #if defined(TAG_STATISTICS_TRACKING)
764 VOID STATIC
765 MiDumpTagStats(ULONG CurrentTag, ULONG CurrentNrBlocks, ULONG CurrentSize)
766 {
767 CHAR c1, c2, c3, c4;
768
769 c1 = (CHAR)((CurrentTag >> 24) & 0xFF);
770 c2 = (CHAR)((CurrentTag >> 16) & 0xFF);
771 c3 = (CHAR)((CurrentTag >> 8) & 0xFF);
772 c4 = (CHAR)(CurrentTag & 0xFF);
773
774 if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
775 {
776 DbgPrint("Tag %x (%c%c%c%c) Blocks %d Total Size %d Average Size %d\n",
777 CurrentTag, c4, c3, c2, c1, CurrentNrBlocks,
778 CurrentSize, CurrentSize / CurrentNrBlocks);
779 }
780 else
781 {
782 DbgPrint("Tag %x Blocks %d Total Size %d Average Size %d ",
783 CurrentTag, CurrentNrBlocks, CurrentSize,
784 CurrentSize / CurrentNrBlocks);
785 KeRosPrintAddress((PVOID)CurrentTag);
786 DbgPrint("\n");
787 }
788 }
789 #endif /* defined(TAG_STATISTICS_TRACKING) */
790
791 VOID
792 NTAPI
793 MiDebugDumpNonPagedPoolStats(BOOLEAN NewOnly)
794 {
795 #if defined(TAG_STATISTICS_TRACKING)
796 ULONG i;
797 HDR_USED* current;
798 ULONG CurrentTag;
799 ULONG CurrentNrBlocks = 0;
800 ULONG CurrentSize = 0;
801 ULONG TotalBlocks;
802 ULONG TotalSize;
803 ULONG Size;
804 LIST_ENTRY tmpListHead;
805 PLIST_ENTRY current_entry;
806
807 DbgPrint("******* Dumping non paging pool stats ******\n");
808 TotalBlocks = 0;
809 TotalSize = 0;
810 for (i = 0; i < TAG_HASH_TABLE_SIZE; i++)
811 {
812 InitializeListHead(&tmpListHead);
813
814 while (!IsListEmpty(&tag_hash_table[i]))
815 {
816 CurrentTag = 0;
817
818 current_entry = tag_hash_table[i].Flink;
819 while (current_entry != &tag_hash_table[i])
820 {
821 current = CONTAINING_RECORD(current_entry, HDR_USED, TagListEntry);
822 current_entry = current_entry->Flink;
823 if (CurrentTag == 0)
824 {
825 CurrentTag = current->Tag;
826 CurrentNrBlocks = 0;
827 CurrentSize = 0;
828 }
829 if (current->Tag == CurrentTag)
830 {
831 RemoveEntryList(&current->TagListEntry);
832 InsertHeadList(&tmpListHead, &current->TagListEntry);
833 if (!NewOnly || !current->Dumped)
834 {
835 CurrentNrBlocks++;
836 TotalBlocks++;
837 CurrentSize += current->hdr.Size;
838 TotalSize += current->hdr.Size;
839 current->Dumped = TRUE;
840 }
841 }
842 }
843 if (CurrentTag != 0 && CurrentNrBlocks != 0)
844 {
845 MiDumpTagStats(CurrentTag, CurrentNrBlocks, CurrentSize);
846 }
847 }
848 if (!IsListEmpty(&tmpListHead))
849 {
850 tag_hash_table[i].Flink = tmpListHead.Flink;
851 tag_hash_table[i].Flink->Blink = &tag_hash_table[i];
852 tag_hash_table[i].Blink = tmpListHead.Blink;
853 tag_hash_table[i].Blink->Flink = &tag_hash_table[i];
854 }
855 }
856 if (TotalBlocks != 0)
857 {
858 DbgPrint("TotalBlocks %d TotalSize %d AverageSize %d\n",
859 TotalBlocks, TotalSize, TotalSize / TotalBlocks);
860 }
861 else
862 {
863 DbgPrint("TotalBlocks %d TotalSize %d\n",
864 TotalBlocks, TotalSize);
865 }
866 Size = EiFreeNonPagedPool - (MiNonPagedPoolLength - MiNonPagedPoolNrOfPages * PAGE_SIZE);
867 DbgPrint("Freeblocks %d TotalFreeSize %d AverageFreeSize %d\n",
868 EiNrFreeBlocks, Size, EiNrFreeBlocks ? Size / EiNrFreeBlocks : 0);
869 DbgPrint("***************** Dump Complete ***************\n");
870 #endif /* defined(TAG_STATISTICS_TRACKING) */
871 }
872
873 VOID
874 NTAPI
875 MiDebugDumpNonPagedPool(BOOLEAN NewOnly)
876 {
877 #if defined(POOL_DEBUG_APIS)
878 HDR_USED* current;
879 PLIST_ENTRY current_entry;
880 KIRQL oldIrql;
881
882 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
883
884 DbgPrint("******* Dumping non paging pool contents ******\n");
885 current_entry = UsedBlockListHead.Flink;
886 while (current_entry != &UsedBlockListHead)
887 {
888 current = CONTAINING_RECORD(current_entry, HDR_USED, ListEntry);
889 if (!NewOnly || !current->Dumped)
890 {
891 CHAR c1, c2, c3, c4;
892
893 c1 = (CHAR)((current->Tag >> 24) & 0xFF);
894 c2 = (CHAR)((current->Tag >> 16) & 0xFF);
895 c3 = (CHAR)((current->Tag >> 8) & 0xFF);
896 c4 = (CHAR)(current->Tag & 0xFF);
897
898 if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
899 {
900 DbgPrint("Size 0x%x Tag 0x%x (%c%c%c%c) Allocator 0x%x\n",
901 current->hdr.Size, current->Tag, c4, c3, c2, c1,
902 current->Caller);
903 }
904 else
905 {
906 DbgPrint("Size 0x%x Tag 0x%x Allocator 0x%x\n",
907 current->hdr.Size, current->Tag, current->Caller);
908 }
909 current->Dumped = TRUE;
910 }
911 current_entry = current_entry->Flink;
912 }
913 DbgPrint("***************** Dump Complete ***************\n");
914 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
915 #endif
916 }
917
918 #ifdef ENABLE_VALIDATE_POOL
919 static void validate_free_list(void)
920 /*
921 * FUNCTION: Validate the integrity of the list of free blocks
922 */
923 {
924 HDR_FREE* current;
925 unsigned int blocks_seen=0;
926 PNODE p;
927
928 p = avl_get_first(FreeBlockListRoot);
929
930 while(p)
931 {
932 PVOID base_addr;
933
934 current = CONTAINING_RECORD(p, HDR_FREE, Node);
935 base_addr = (PVOID)current;
936
937 if (current->hdr.Magic != BLOCK_HDR_FREE_MAGIC)
938 {
939 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
940 current);
941 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
942 }
943
944 if (base_addr < MiNonPagedPoolStart ||
945 (ULONG_PTR)base_addr + current->hdr.Size > (ULONG_PTR)MiNonPagedPoolStart + MiNonPagedPoolLength)
946 {
947 DbgPrint("Block %x found outside pool area\n",current);
948 DbgPrint("Size %d\n",current->hdr.Size);
949 DbgPrint("Limits are %x %x\n",MiNonPagedPoolStart,
950 (ULONG_PTR)MiNonPagedPoolStart+MiNonPagedPoolLength);
951 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
952 }
953 blocks_seen++;
954 if (blocks_seen > EiNrFreeBlocks)
955 {
956 DbgPrint("Too many blocks on free list\n");
957 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
958 }
959 p = avl_get_next(FreeBlockListRoot, p);
960 }
961 }
962
963 static void validate_used_list(void)
964 /*
965 * FUNCTION: Validate the integrity of the list of used blocks
966 */
967 {
968 HDR_USED* current;
969 PLIST_ENTRY current_entry;
970 unsigned int blocks_seen=0;
971
972 current_entry = UsedBlockListHead.Flink;
973 while (current_entry != &UsedBlockListHead)
974 {
975 PVOID base_addr;
976
977 current = CONTAINING_RECORD(current_entry, HDR_USED, ListEntry);
978 base_addr = (PVOID)current;
979
980 if (current->hdr.Magic != BLOCK_HDR_USED_MAGIC)
981 {
982 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
983 current);
984 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
985 }
986 if (base_addr < MiNonPagedPoolStart ||
987 ((ULONG_PTR)base_addr+current->hdr.Size) >
988 (ULONG_PTR)MiNonPagedPoolStart+MiNonPagedPoolLength)
989 {
990 DbgPrint("Block %x found outside pool area\n",current);
991 DbgPrint("Size %d\n",current->hdr.Size);
992 DbgPrint("Limits are %x %x\n",MiNonPagedPoolStart,
993 (ULONG_PTR)MiNonPagedPoolStart+MiNonPagedPoolLength);
994 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
995 }
996 blocks_seen++;
997 if (blocks_seen > EiNrUsedBlocks)
998 {
999 DbgPrint("Too many blocks on used list\n");
1000 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1001 }
1002 if (current->ListEntry.Flink != &UsedBlockListHead &&
1003 current->ListEntry.Flink->Blink != &current->ListEntry)
1004 {
1005 DbgPrint("%s:%d:Break in list (current %x next %x "
1006 "current->next->previous %x)\n",
1007 __FILE__,__LINE__,current, current->ListEntry.Flink,
1008 current->ListEntry.Flink->Blink);
1009 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1010 }
1011
1012 current_entry = current_entry->Flink;
1013 }
1014 }
1015
1016 static void check_duplicates(HDR* blk)
1017 /*
1018 * FUNCTION: Check a block has no duplicates
1019 * ARGUMENTS:
1020 * blk = block to check
1021 * NOTE: Bug checks if duplicates are found
1022 */
1023 {
1024 ULONG_PTR base = (ULONG_PTR)blk;
1025 ULONG_PTR last = (ULONG_PTR)blk + blk->Size;
1026 PLIST_ENTRY current_entry;
1027 PNODE p;
1028 HDR_FREE* free;
1029 HDR_USED* used;
1030
1031 p = avl_get_first(FreeBlockListRoot);
1032
1033 while (p)
1034 {
1035 free = CONTAINING_RECORD(p, HDR_FREE, Node);
1036 if (free->hdr.Magic != BLOCK_HDR_FREE_MAGIC)
1037 {
1038 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
1039 free);
1040 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1041 }
1042
1043 if ( (ULONG_PTR)free > base && (ULONG_PTR)free < last )
1044 {
1045 DbgPrint("intersecting blocks on list\n");
1046 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1047 }
1048 if ( (ULONG_PTR)free < base &&
1049 ((ULONG_PTR)free + free->hdr.Size) > base )
1050 {
1051 DbgPrint("intersecting blocks on list\n");
1052 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1053 }
1054 p = avl_get_next(FreeBlockListRoot, p);
1055 }
1056
1057 current_entry = UsedBlockListHead.Flink;
1058 while (current_entry != &UsedBlockListHead)
1059 {
1060 used = CONTAINING_RECORD(current_entry, HDR_USED, ListEntry);
1061
1062 if ( (ULONG_PTR)used > base && (ULONG_PTR)used < last )
1063 {
1064 DbgPrint("intersecting blocks on list\n");
1065 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1066 }
1067 if ( (ULONG_PTR)used < base &&
1068 ((ULONG_PTR)used + used->hdr.Size) > base )
1069 {
1070 DbgPrint("intersecting blocks on list\n");
1071 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1072 }
1073
1074 current_entry = current_entry->Flink;
1075 }
1076
1077 }
1078
1079 static void validate_kernel_pool(void)
1080 /*
1081 * FUNCTION: Checks the integrity of the kernel memory heap
1082 */
1083 {
1084 HDR_FREE* free;
1085 HDR_USED* used;
1086 PLIST_ENTRY current_entry;
1087 PNODE p;
1088
1089 validate_free_list();
1090 validate_used_list();
1091
1092 p = avl_get_first(FreeBlockListRoot);
1093 while (p)
1094 {
1095 free = CONTAINING_RECORD(p, HDR_FREE, Node);
1096 check_duplicates(&free->hdr);
1097 p = avl_get_next(FreeBlockListRoot, p);
1098 }
1099 current_entry = UsedBlockListHead.Flink;
1100 while (current_entry != &UsedBlockListHead)
1101 {
1102 used = CONTAINING_RECORD(current_entry, HDR_USED, ListEntry);
1103 check_duplicates(&used->hdr);
1104 current_entry = current_entry->Flink;
1105 }
1106 }
1107 #endif
1108
1109 #if 0
1110 STATIC VOID
1111 free_pages(HDR_FREE* blk)
1112 {
1113 ULONG start;
1114 ULONG end;
1115
1116 start = (ULONG_PTR)blk;
1117 end = (ULONG_PTR)blk + blk->hdr.Size;
1118
1119 /*
1120 * If the block doesn't contain a whole page then there is nothing to do
1121 */
1122 if (PAGE_ROUND_UP(start) >= PAGE_ROUND_DOWN(end))
1123 {
1124 return;
1125 }
1126 }
1127 #endif
1128
1129 static void remove_from_used_list(HDR_USED* current)
1130 {
1131 RemoveEntryList(&current->ListEntry);
1132 EiUsedNonPagedPool -= current->hdr.Size;
1133 EiNrUsedBlocks--;
1134 }
1135
1136 static void remove_from_free_list(HDR_FREE* current)
1137 {
1138 DPRINT("remove_from_free_list %d\n", current->hdr.Size);
1139
1140 avl_remove(&FreeBlockListRoot, &current->Node, compare_node);
1141
1142 EiFreeNonPagedPool -= current->hdr.Size;
1143 EiNrFreeBlocks--;
1144 DPRINT("remove_from_free_list done\n");
1145 #ifdef DUMP_AVL
1146
1147 DumpFreeBlockTree();
1148 #endif
1149 }
1150
1151 static void
1152 add_to_free_list(HDR_FREE* blk)
1153 /*
1154 * FUNCTION: add the block to the free list (internal)
1155 */
1156 {
1157 HDR_FREE* current;
1158 BOOL UpdatePrevPtr = FALSE;
1159
1160 DPRINT("add_to_free_list (%d)\n", blk->hdr.Size);
1161
1162 EiNrFreeBlocks++;
1163
1164 current = (HDR_FREE*)blk->hdr.previous;
1165 if (current && current->hdr.Magic == BLOCK_HDR_FREE_MAGIC)
1166 {
1167 remove_from_free_list(current);
1168 current->hdr.Size = current->hdr.Size + blk->hdr.Size;
1169 current->hdr.Magic = BLOCK_HDR_USED_MAGIC;
1170 memset(blk, 0xcc, HDR_USED_SIZE);
1171 blk = current;
1172 UpdatePrevPtr = TRUE;
1173 }
1174
1175 current = (HDR_FREE*)((ULONG_PTR)blk + blk->hdr.Size);
1176 if ((char*)current < (char*)MiNonPagedPoolStart + MiNonPagedPoolLength &&
1177 current->hdr.Magic == BLOCK_HDR_FREE_MAGIC)
1178 {
1179 remove_from_free_list(current);
1180 blk->hdr.Size += current->hdr.Size;
1181 memset(current, 0xcc, HDR_FREE_SIZE);
1182 UpdatePrevPtr = TRUE;
1183 current = (HDR_FREE*)((ULONG_PTR)blk + blk->hdr.Size);
1184 }
1185 if (UpdatePrevPtr &&
1186 (char*)current < (char*)MiNonPagedPoolStart + MiNonPagedPoolLength)
1187 {
1188 current->hdr.previous = &blk->hdr;
1189 }
1190 DPRINT("%d\n", blk->hdr.Size);
1191 blk->hdr.Magic = BLOCK_HDR_FREE_MAGIC;
1192 EiFreeNonPagedPool += blk->hdr.Size;
1193 avl_insert(&FreeBlockListRoot, &blk->Node, compare_node);
1194 DPRINT("add_to_free_list done\n");
1195 #ifdef DUMP_AVL
1196
1197 DumpFreeBlockTree();
1198 #endif
1199 }
1200
1201 static void add_to_used_list(HDR_USED* blk)
1202 /*
1203 * FUNCTION: add the block to the used list (internal)
1204 */
1205 {
1206 InsertHeadList(&UsedBlockListHead, &blk->ListEntry);
1207 EiUsedNonPagedPool += blk->hdr.Size;
1208 EiNrUsedBlocks++;
1209 }
1210
1211
1212 static BOOLEAN
1213 grow_block(HDR_FREE* blk, PVOID end)
1214 {
1215 NTSTATUS Status;
1216 PFN_TYPE Page[32];
1217 ULONG_PTR StartIndex, EndIndex;
1218 ULONG i, j, k;
1219
1220 StartIndex = (ULONG_PTR)(PAGE_ROUND_UP((ULONG_PTR)blk + HDR_FREE_SIZE - (ULONG_PTR)MiNonPagedPoolStart)) / PAGE_SIZE;
1221 EndIndex = ((ULONG_PTR)PAGE_ROUND_UP(end) - (ULONG_PTR)MiNonPagedPoolStart) / PAGE_SIZE;
1222
1223
1224 for (i = StartIndex; i < EndIndex; i++)
1225 {
1226 if (!(MiNonPagedPoolAllocMap[i / 32] & (1 << (i % 32))))
1227 {
1228 for (j = i + 1; j < EndIndex && j - i < 32; j++)
1229 {
1230 if (MiNonPagedPoolAllocMap[j / 32] & (1 << (j % 32)))
1231 {
1232 break;
1233 }
1234 }
1235 for (k = 0; k < j - i; k++)
1236 {
1237 Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &Page[k]);
1238 if (!NT_SUCCESS(Status))
1239 {
1240 for (i = 0; i < k; i++)
1241 {
1242 MmReleasePageMemoryConsumer(MC_NPPOOL, Page[i]);
1243 }
1244 return FALSE;
1245 }
1246 }
1247 Status = MmCreateVirtualMapping(NULL,
1248 (PVOID)((ULONG_PTR)MiNonPagedPoolStart + i * PAGE_SIZE),
1249 PAGE_READWRITE|PAGE_SYSTEM,
1250 Page,
1251 k);
1252 if (!NT_SUCCESS(Status))
1253 {
1254 for (i = 0; i < k; i++)
1255 {
1256 MmReleasePageMemoryConsumer(MC_NPPOOL, Page[i]);
1257 }
1258 return FALSE;
1259 }
1260 for (j = i; j < k + i; j++)
1261 {
1262 MiNonPagedPoolAllocMap[j / 32] |= (1 << (j % 32));
1263 }
1264 MiNonPagedPoolNrOfPages += k;
1265 i += k - 1;
1266 }
1267 }
1268 return TRUE;
1269 }
1270
1271 static HDR_USED* get_block(unsigned int size, unsigned long alignment)
1272 {
1273 HDR_FREE *blk, *current, *previous = NULL, *next = NULL, *best = NULL;
1274 ULONG previous_size = 0, current_size, next_size = 0, new_size;
1275 PVOID end;
1276 PVOID addr, aligned_addr, best_aligned_addr=NULL;
1277 PNODE p;
1278
1279 DPRINT("get_block %d\n", size);
1280
1281 p = avl_find_equal_or_greater(FreeBlockListRoot, size + HDR_USED_SIZE, compare_value);
1282 while (p)
1283 {
1284 current = CONTAINING_RECORD(p, HDR_FREE, Node);
1285 addr = (PVOID)((ULONG_PTR)current + HDR_USED_SIZE);
1286 /* calculate first aligned address available within this block */
1287 aligned_addr = alignment > 0 ? MM_ROUND_UP(addr, alignment) : addr;
1288 if (size < PAGE_SIZE)
1289 {
1290 /* check that the block is in one page */
1291 if (PAGE_ROUND_DOWN(aligned_addr) != PAGE_ROUND_DOWN((ULONG_PTR)aligned_addr + size - 1))
1292 {
1293 aligned_addr = (PVOID)PAGE_ROUND_UP(aligned_addr);
1294 }
1295 }
1296 DPRINT("%x %x\n", addr, aligned_addr);
1297 if (aligned_addr != addr)
1298 {
1299 while((ULONG_PTR)aligned_addr - (ULONG_PTR)addr < HDR_FREE_SIZE)
1300 {
1301 if (alignment == 0)
1302 {
1303 aligned_addr = (PVOID)((ULONG_PTR)current + HDR_USED_SIZE + HDR_FREE_SIZE);
1304 }
1305 else
1306 {
1307 aligned_addr = MM_ROUND_UP((PVOID)((ULONG_PTR)current + HDR_USED_SIZE + HDR_FREE_SIZE), alignment);
1308 }
1309 if (size < PAGE_SIZE)
1310 {
1311 /* check that the block is in one page */
1312 if (PAGE_ROUND_DOWN(aligned_addr) != PAGE_ROUND_DOWN((ULONG_PTR)aligned_addr + size - 1))
1313 {
1314 aligned_addr = (PVOID)PAGE_ROUND_UP(aligned_addr);
1315 }
1316 }
1317 }
1318 }
1319 DPRINT("%x %x\n", addr, aligned_addr);
1320 new_size = (ULONG_PTR)aligned_addr - (ULONG_PTR)addr + size;
1321 if (current->hdr.Size >= new_size + HDR_USED_SIZE &&
1322 (best == NULL || current->hdr.Size < best->hdr.Size))
1323 {
1324 best = current;
1325 best_aligned_addr = aligned_addr;
1326 if (new_size <= size + 2 * HDR_FREE_SIZE)
1327 {
1328 break;
1329 }
1330 }
1331
1332 if (best)
1333 {
1334 if (size < PAGE_SIZE)
1335 {
1336 if (current->hdr.Size >= 2 * PAGE_SIZE + HDR_FREE_SIZE)
1337 {
1338 break;
1339 }
1340 }
1341 else
1342 {
1343 if (current->hdr.Size >= size + alignment + HDR_FREE_SIZE)
1344 {
1345 break;
1346 }
1347 }
1348 }
1349 p = avl_get_next(FreeBlockListRoot, p);
1350 }
1351 /*
1352 * We didn't find anything suitable at all.
1353 */
1354 if (best == NULL)
1355 {
1356 return NULL;
1357 }
1358
1359 DPRINT(":: blk %x blk->hdr.Size %d (%d) Size %d\n", best, best->hdr.Size, best->hdr.Size - HDR_USED_SIZE, size);
1360
1361 current = best;
1362 current_size = current->hdr.Size - HDR_USED_SIZE;
1363 addr = (PVOID)((ULONG_PTR)current + HDR_USED_SIZE);
1364 if (addr != best_aligned_addr)
1365 {
1366 blk = (HDR_FREE*)((ULONG_PTR)best_aligned_addr - HDR_USED_SIZE);
1367 /*
1368 * if size-aligned, break off the preceding bytes into their own block...
1369 */
1370 previous = current;
1371 previous_size = (ULONG_PTR)blk - (ULONG_PTR)previous - HDR_FREE_SIZE;
1372 current = blk;
1373 current_size -= ((ULONG_PTR)current - (ULONG_PTR)previous);
1374 }
1375 end = (PVOID)((ULONG_PTR)current + HDR_USED_SIZE + size);
1376
1377 if (current_size >= size + HDR_FREE_SIZE + MM_POOL_ALIGNMENT)
1378 {
1379 /* create a new free block after our block, if the memory size is >= 4 byte for this block */
1380 next = (HDR_FREE*)((ULONG_PTR)current + size + HDR_USED_SIZE);
1381 next_size = current_size - size - HDR_FREE_SIZE;
1382 current_size = size;
1383 end = (PVOID)((ULONG_PTR)next + HDR_FREE_SIZE);
1384 }
1385
1386 if (previous)
1387 {
1388 remove_from_free_list(previous);
1389 if (!grow_block(previous, end))
1390 {
1391 add_to_free_list(previous);
1392 return NULL;
1393 }
1394 memset(current, 0, HDR_USED_SIZE);
1395 current->hdr.Size = current_size + HDR_USED_SIZE;
1396 current->hdr.Magic = BLOCK_HDR_USED_MAGIC;
1397 current->hdr.previous = &previous->hdr;
1398 previous->hdr.Size = previous_size + HDR_FREE_SIZE;
1399 if (next == NULL)
1400 {
1401 blk = (HDR_FREE*)((ULONG_PTR)current + current->hdr.Size);
1402 if ((ULONG_PTR)blk < (ULONG_PTR)MiNonPagedPoolStart + MiNonPagedPoolLength)
1403 {
1404 blk->hdr.previous = &current->hdr;
1405 }
1406 }
1407
1408 add_to_free_list(previous);
1409 }
1410 else
1411 {
1412 remove_from_free_list(current);
1413
1414 if (!grow_block(current, end))
1415 {
1416 add_to_free_list(current);
1417 return NULL;
1418 }
1419 current->hdr.Magic = BLOCK_HDR_USED_MAGIC;
1420 if (next)
1421 {
1422 current->hdr.Size = current_size + HDR_USED_SIZE;
1423 }
1424 }
1425
1426 if (next)
1427 {
1428 memset(next, 0, HDR_FREE_SIZE);
1429 next->hdr.Size = next_size + HDR_FREE_SIZE;
1430 next->hdr.Magic = BLOCK_HDR_FREE_MAGIC;
1431 next->hdr.previous = &current->hdr;
1432 blk = (HDR_FREE*)((ULONG_PTR)next + next->hdr.Size);
1433 if ((ULONG_PTR)blk < (ULONG_PTR)MiNonPagedPoolStart + MiNonPagedPoolLength)
1434 {
1435 blk->hdr.previous = &next->hdr;
1436 }
1437 add_to_free_list(next);
1438 }
1439
1440 add_to_used_list((HDR_USED*)current);
1441 VALIDATE_POOL;
1442
1443 if (size < PAGE_SIZE)
1444 {
1445 addr = (PVOID)((ULONG_PTR)current + HDR_USED_SIZE);
1446 if (PAGE_ROUND_DOWN(addr) != PAGE_ROUND_DOWN((PVOID)((ULONG_PTR)addr + size - 1)))
1447 {
1448 DPRINT1("%x %x\n", addr, (ULONG_PTR)addr + size);
1449 }
1450 ASSERT (PAGE_ROUND_DOWN(addr) == PAGE_ROUND_DOWN((PVOID)((ULONG_PTR)addr + size - 1)));
1451 }
1452 if (alignment)
1453 {
1454 addr = (PVOID)((ULONG_PTR)current + HDR_USED_SIZE);
1455 ASSERT(MM_ROUND_UP(addr, alignment) == addr);
1456 }
1457 return (HDR_USED*)current;
1458 }
1459
1460 ULONG STDCALL
1461 ExRosQueryNonPagedPoolTag ( PVOID Addr )
1462 {
1463 HDR_USED* blk=(HDR_USED*)((ULONG_PTR)Addr - HDR_USED_SIZE);
1464 if (blk->hdr.Magic != BLOCK_HDR_USED_MAGIC)
1465 KEBUGCHECK(0);
1466
1467 return blk->Tag;
1468 }
1469
1470 #if defined(NPOOL_REDZONE_CHECK) || defined(NPOOL_REDZONE_CHECK_FULL)
1471 void check_redzone_header(HDR_USED* hdr)
1472 {
1473 PBYTE LoZone = (PBYTE)((ULONG_PTR)hdr + HDR_USED_SIZE - NPOOL_REDZONE_SIZE);
1474 PBYTE HiZone = (PBYTE)((ULONG_PTR)hdr + HDR_USED_SIZE + hdr->UserSize);
1475 BOOL LoOK = TRUE;
1476 BOOL HiOK = TRUE;
1477 ULONG i;
1478 CHAR c[5];
1479
1480 for (i = 0; i < NPOOL_REDZONE_SIZE; i++)
1481 {
1482 if (LoZone[i] != NPOOL_REDZONE_LOVALUE)
1483 {
1484 LoOK = FALSE;
1485 }
1486 if (HiZone[i] != NPOOL_REDZONE_HIVALUE)
1487 {
1488 HiOK = FALSE;
1489 }
1490 }
1491
1492 if (!HiOK || !LoOK)
1493 {
1494 c[0] = (CHAR)((hdr->Tag >> 24) & 0xFF);
1495 c[1] = (CHAR)((hdr->Tag >> 16) & 0xFF);
1496 c[2] = (CHAR)((hdr->Tag >> 8) & 0xFF);
1497 c[3] = (CHAR)(hdr->Tag & 0xFF);
1498 c[4] = 0;
1499
1500 if (!isprint(c[0]) || !isprint(c[1]) || !isprint(c[2]) || !isprint(c[3]))
1501 {
1502 c[0] = 0;
1503 }
1504
1505 if (!LoOK)
1506 {
1507 DbgPrint("NPOOL: Low-side redzone overwritten, Block %x, Size %d, Tag %x(%s), Caller %x\n",
1508 (ULONG_PTR)hdr + HDR_USED_SIZE, hdr->UserSize, hdr->Tag, c, hdr->Caller);
1509 }
1510 if (!HiOK)
1511 {
1512 DbgPrint("NPPOL: High-side redzone overwritten, Block %x, Size %d, Tag %x(%s), Caller %x\n",
1513 (ULONG_PTR)hdr + HDR_USED_SIZE, hdr->UserSize, hdr->Tag, c, hdr->Caller);
1514 }
1515 KEBUGCHECK(0);
1516 }
1517 }
1518 #endif
1519
1520 #ifdef NPOOL_REDZONE_CHECK_FULL
1521 void check_redzone_list(void)
1522 {
1523 PLIST_ENTRY current_entry;
1524
1525 current_entry = UsedBlockListHead.Flink;
1526 while (current_entry != &UsedBlockListHead)
1527 {
1528 check_redzone_header(CONTAINING_RECORD(current_entry, HDR_USED, ListEntry));
1529 current_entry = current_entry->Flink;
1530 }
1531 }
1532 #endif
1533
1534
1535 VOID STDCALL ExFreeNonPagedPool (PVOID block)
1536 /*
1537 * FUNCTION: Releases previously allocated memory
1538 * ARGUMENTS:
1539 * block = block to free
1540 */
1541 {
1542 HDR_USED* blk=(HDR_USED*)((ULONG_PTR)block - HDR_USED_SIZE);
1543 KIRQL oldIrql;
1544
1545 if (block == NULL)
1546 {
1547 return;
1548 }
1549
1550 DPRINT("freeing block %x\n",blk);
1551
1552 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->hdr.Size,
1553 blk->Caller);
1554 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1555
1556 VALIDATE_POOL;
1557 if (blk->hdr.Magic != BLOCK_HDR_USED_MAGIC)
1558 {
1559 if (blk->hdr.Magic == BLOCK_HDR_FREE_MAGIC)
1560 {
1561 DbgPrint("ExFreePool of already freed address %x\n", block);
1562 }
1563 else
1564 {
1565 DbgPrint("ExFreePool of non-allocated address %x (magic %x)\n",
1566 block, blk->hdr.Magic);
1567 }
1568 KEBUGCHECK(0);
1569 return;
1570 }
1571
1572 #if defined(NPOOL_REDZONE_CHECK) || defined(NPOOL_REDZONE_CHECK_FULL)
1573 check_redzone_header(blk);
1574 #endif
1575
1576 #ifdef NPOOL_REDZONE_CHECK_FULL
1577 check_redzone_list();
1578 #endif
1579
1580 memset(block, 0xcc, blk->hdr.Size - HDR_USED_SIZE);
1581
1582 #ifdef TAG_STATISTICS_TRACKING
1583 MiRemoveFromTagHashTable(blk);
1584 #endif
1585
1586 remove_from_used_list(blk);
1587 blk->hdr.Magic = BLOCK_HDR_FREE_MAGIC;
1588 add_to_free_list((HDR_FREE*)blk);
1589 VALIDATE_POOL;
1590 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1591 }
1592
1593 PVOID STDCALL
1594 ExAllocateNonPagedPoolWithTag(POOL_TYPE Type, ULONG Size, ULONG Tag, PVOID Caller)
1595 {
1596 #if defined(NPOOL_REDZONE_CHECK) || defined(NPOOL_REDZONE_CHECK_FULL)
1597 ULONG UserSize;
1598 #endif
1599 PVOID block;
1600 HDR_USED* best = NULL;
1601 KIRQL oldIrql;
1602 ULONG alignment;
1603
1604 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
1605 Size,Caller);
1606
1607 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1608
1609 #ifdef NPOOL_REDZONE_CHECK_FULL
1610 check_redzone_list();
1611 #endif
1612
1613 VALIDATE_POOL;
1614
1615 #if 1
1616 /* after some allocations print the npaged pool stats */
1617 #ifdef TAG_STATISTICS_TRACKING
1618
1619 {
1620 static ULONG counter = 0;
1621 if (counter++ % 100000 == 0)
1622 {
1623 MiDebugDumpNonPagedPoolStats(FALSE);
1624 MmPrintMemoryStatistic();
1625 }
1626 }
1627 #endif
1628 #endif
1629 /*
1630 * accomodate this useful idiom
1631 */
1632 if (Size == 0)
1633 {
1634 POOL_TRACE("= NULL\n");
1635 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1636 return(NULL);
1637 }
1638 /* Make the size dword alligned, this makes the block dword alligned */
1639 #if defined(NPOOL_REDZONE_CHECK) || defined(NPOOL_REDZONE_CHECK_FULL)
1640 UserSize = Size;
1641 Size = ROUND_UP(Size + NPOOL_REDZONE_SIZE, MM_POOL_ALIGNMENT);
1642 #else
1643 Size = ROUND_UP(Size, MM_POOL_ALIGNMENT);
1644 #endif
1645
1646 if (Size >= PAGE_SIZE)
1647 {
1648 alignment = PAGE_SIZE;
1649 }
1650 else if (Type & CACHE_ALIGNED_POOL_MASK)
1651 {
1652 alignment = MM_CACHE_LINE_SIZE;
1653 }
1654 else
1655 {
1656 alignment = 0;
1657 }
1658
1659 best = get_block(Size, alignment);
1660 if (best == NULL)
1661 {
1662 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1663 DPRINT1("Trying to allocate %lu bytes from nonpaged pool - nothing suitable found, returning NULL\n",
1664 Size );
1665 KeRosDumpStackFrames(NULL, 10);
1666 return NULL;
1667 }
1668 best->Tag = Tag;
1669 best->Caller = Caller;
1670 best->Dumped = FALSE;
1671 best->TagListEntry.Flink = best->TagListEntry.Blink = NULL;
1672 #if defined(NPOOL_REDZONE_CHECK) || defined(NPOOL_REDZONE_CHECK_FULL)
1673 best->UserSize = UserSize;
1674 memset((PVOID)((ULONG_PTR)best + HDR_USED_SIZE - NPOOL_REDZONE_SIZE), NPOOL_REDZONE_LOVALUE, NPOOL_REDZONE_SIZE);
1675 memset((PVOID)((ULONG_PTR)best + HDR_USED_SIZE + UserSize), NPOOL_REDZONE_HIVALUE, NPOOL_REDZONE_SIZE);
1676 #endif
1677
1678 #ifdef TAG_STATISTICS_TRACKING
1679
1680 MiAddToTagHashTable(best);
1681 #endif
1682
1683 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1684 block = (PVOID)((ULONG_PTR)best + HDR_USED_SIZE);
1685 /* RtlZeroMemory(block, Size);*/
1686 return(block);
1687 }
1688
1689 VOID
1690 INIT_FUNCTION
1691 NTAPI
1692 MiInitializeNonPagedPool(VOID)
1693 {
1694 NTSTATUS Status;
1695 PFN_TYPE Page;
1696 ULONG i;
1697 PVOID Address;
1698 HDR_USED* used;
1699 HDR_FREE* free;
1700 #ifdef TAG_STATISTICS_TRACKING
1701
1702 for (i = 0; i < TAG_HASH_TABLE_SIZE; i++)
1703 {
1704 InitializeListHead(&tag_hash_table[i]);
1705 }
1706 #endif
1707 KeInitializeSpinLock(&MmNpoolLock);
1708 InitializeListHead(&UsedBlockListHead);
1709 InitializeListHead(&AddressListHead);
1710 FreeBlockListRoot = NULL;
1711
1712 MiNonPagedPoolAllocMap = (PVOID)((ULONG_PTR)MiNonPagedPoolStart + PAGE_SIZE);
1713 #if defined(NPOOL_REDZONE_CHECK) || defined(NPOOL_REDZONE_CHECK_FULL)
1714 MiNonPagedPoolNrOfPages = ROUND_UP(MiNonPagedPoolLength / PAGE_SIZE, 32) / 8;
1715 MiNonPagedPoolNrOfPages = ROUND_UP(MiNonPagedPoolNrOfPages + NPOOL_REDZONE_SIZE, MM_POOL_ALIGNMENT);
1716 MiNonPagedPoolNrOfPages = PAGE_ROUND_UP(MiNonPagedPoolNrOfPages + HDR_FREE_SIZE) + PAGE_SIZE;
1717 #else
1718 MiNonPagedPoolNrOfPages = PAGE_ROUND_UP(ROUND_UP(MiNonPagedPoolLength / PAGE_SIZE, 32) / 8 + HDR_FREE_SIZE) + PAGE_SIZE;
1719 #endif
1720 MiNonPagedPoolNrOfPages /= PAGE_SIZE;
1721 Address = MiNonPagedPoolStart;
1722
1723 for (i = 0; i < MiNonPagedPoolNrOfPages; i++)
1724 {
1725 Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &Page);
1726 if (!NT_SUCCESS(Status))
1727 {
1728 DbgPrint("Unable to allocate a page\n");
1729 KEBUGCHECK(0);
1730 }
1731
1732 Status = MmCreateVirtualMapping(NULL,
1733 Address,
1734 PAGE_READWRITE|PAGE_SYSTEM,
1735 &Page,
1736 1);
1737 if (!NT_SUCCESS(Status))
1738 {
1739 DbgPrint("Unable to create virtual mapping\n");
1740 KEBUGCHECK(0);
1741 }
1742 Address = (PVOID)((ULONG_PTR)Address + PAGE_SIZE);
1743 }
1744
1745 for (i = 0; i < MiNonPagedPoolNrOfPages; i++)
1746 {
1747 MiNonPagedPoolAllocMap[i / 32] |= (1 << (i % 32));
1748 }
1749
1750 /* the first block is free */
1751 free = (HDR_FREE*)MiNonPagedPoolStart;
1752 free->hdr.Magic = BLOCK_HDR_FREE_MAGIC;
1753 free->hdr.Size = PAGE_SIZE - HDR_USED_SIZE;
1754 free->hdr.previous = NULL;
1755 memset((PVOID)((ULONG_PTR)free + HDR_FREE_SIZE), 0xcc, free->hdr.Size - HDR_FREE_SIZE);
1756 add_to_free_list(free);
1757
1758 /* the second block contains the non paged pool bitmap */
1759 used = (HDR_USED*)((ULONG_PTR)free + free->hdr.Size);
1760 used->hdr.Magic = BLOCK_HDR_USED_MAGIC;
1761 #if defined(NPOOL_REDZONE_CHECK) || defined(NPOOL_REDZONE_CHECK_FULL)
1762 used->UserSize = ROUND_UP(MiNonPagedPoolLength / PAGE_SIZE, 32) / 8;
1763 used->hdr.Size = ROUND_UP(used->UserSize + NPOOL_REDZONE_SIZE, MM_POOL_ALIGNMENT) + HDR_USED_SIZE;
1764 memset((PVOID)((ULONG_PTR)used + HDR_USED_SIZE - NPOOL_REDZONE_SIZE), NPOOL_REDZONE_LOVALUE, NPOOL_REDZONE_SIZE);
1765 memset((PVOID)((ULONG_PTR)used + HDR_USED_SIZE + used->UserSize), NPOOL_REDZONE_HIVALUE, NPOOL_REDZONE_SIZE);
1766 #else
1767 used->hdr.Size = ROUND_UP(MiNonPagedPoolLength / PAGE_SIZE, 32) / 8 + HDR_USED_SIZE;
1768 #endif
1769 used->hdr.previous = &free->hdr;
1770 used->Tag = 0xffffffff;
1771 used->Caller = (PVOID)MiInitializeNonPagedPool;
1772 used->Dumped = FALSE;
1773 add_to_used_list(used);
1774 #ifdef TAG_STATISTICS_TRACKING
1775 MiAddToTagHashTable(used);
1776 #endif
1777
1778 /* the third block is the free block after the bitmap */
1779 free = (HDR_FREE*)((ULONG_PTR)used + used->hdr.Size);
1780 free->hdr.Magic = BLOCK_HDR_FREE_MAGIC;
1781 free->hdr.Size = MiNonPagedPoolLength - ((ULONG_PTR)free - (ULONG_PTR)MiNonPagedPoolStart);
1782 free->hdr.previous = &used->hdr;
1783 memset((PVOID)((ULONG_PTR)free + HDR_FREE_SIZE), 0xcc, (ULONG_PTR)Address - (ULONG_PTR)free - HDR_FREE_SIZE);
1784 add_to_free_list(free);
1785 }
1786
1787 PVOID
1788 STDCALL
1789 MiAllocateSpecialPool (IN POOL_TYPE PoolType,
1790 IN SIZE_T NumberOfBytes,
1791 IN ULONG Tag,
1792 IN ULONG Underrun
1793 )
1794 {
1795 /* FIXME: Special Pools not Supported */
1796 DbgPrint("Special Pools not supported\n");
1797 return NULL;
1798 }
1799
1800 /* EOF */