3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/npool.c
6 * PURPOSE: Implements the kernel memory pool
8 * PROGRAMMERS: David Welch (welch@cwcom.net)
9 * Iwan Fatahi (i_fatahi@hotmail.com)
10 * Robert Bergkvist (fragdance@hotmail.com)
14 /* INCLUDES ****************************************************************/
18 #include <internal/debug.h>
20 #if defined (ALLOC_PRAGMA)
21 #pragma alloc_text(INIT, MiInitializeNonPagedPool)
24 #ifdef ENABLE_VALIDATE_POOL
25 #define VALIDATE_POOL validate_kernel_pool()
31 #define POOL_TRACE(args...) do { DbgPrint(args); } while(0);
34 #define POOL_TRACE(args...)
40 VOID
MmPrintMemoryStatistic(VOID
);
42 #define NPOOL_REDZONE_CHECK /* check the block at deallocation */
43 // #define NPOOL_REDZONE_CHECK_FULL /* check all blocks at each allocation/deallocation */
44 #define NPOOL_REDZONE_SIZE 8 /* number of red zone bytes */
45 #define NPOOL_REDZONE_LOVALUE 0x87
46 #define NPOOL_REDZONE_HIVALUE 0xA5
49 /* avl types ****************************************************************/
52 * This declarations should be moved into a separate header file.
57 struct _NODE
* link
[2];
63 /* TYPES *******************************************************************/
65 #define BLOCK_HDR_USED_MAGIC (0xdeadbeef)
66 #define BLOCK_HDR_FREE_MAGIC (0xceadbeef)
69 * fields present at the start of a block (this is for internal use only)
75 struct _HDR
* previous
;
78 typedef struct _HDR_USED
84 LIST_ENTRY TagListEntry
;
85 #if defined(NPOOL_REDZONE_CHECK) || defined(NPOOL_REDZONE_CHECK_FULL)
89 } HDR_USED
, *PHDR_USED
;
91 typedef struct _HDR_FREE
95 } HDR_FREE
, *PHDR_FREE
;
97 #define HDR_FREE_SIZE ROUND_UP(sizeof(HDR_FREE), MM_POOL_ALIGNMENT)
99 #if defined(NPOOL_REDZONE_CHECK) || #defined(NPOOL_REDZONE_CHECK_FULL)
100 #define HDR_USED_SIZE ROUND_UP(sizeof(HDR_USED) + NPOOL_REDZONE_SIZE, MM_POOL_ALIGNMENT)
102 #define HDR_USED_SIZE ROUND_UP(sizeof(HDR_USED), MM_POOL_ALIGNMENT)
105 /* GLOBALS *****************************************************************/
107 extern PVOID MiNonPagedPoolStart
;
108 extern ULONG MiNonPagedPoolLength
;
111 * Head of the list of free blocks
113 static PNODE FreeBlockListRoot
= NULL
;
116 * Head of the list of in use block
118 static LIST_ENTRY UsedBlockListHead
;
120 static LIST_ENTRY AddressListHead
;
123 * Count of free blocks
125 static ULONG EiNrFreeBlocks
= 0;
128 * Count of used blocks
130 static ULONG EiNrUsedBlocks
= 0;
133 * Lock that protects the non-paged pool data structures
135 static KSPIN_LOCK MmNpoolLock
;
138 * Total memory used for free nonpaged pool blocks
140 ULONG EiFreeNonPagedPool
= 0;
143 * Total memory used for nonpaged pool blocks
145 ULONG EiUsedNonPagedPool
= 0;
147 /* Total quota for Non Paged Pool */
148 ULONG MmTotalNonPagedPoolQuota
= 0;
150 #ifdef TAG_STATISTICS_TRACKING
151 #define TAG_HASH_TABLE_SIZE (1024)
152 static LIST_ENTRY tag_hash_table
[TAG_HASH_TABLE_SIZE
];
153 #endif /* TAG_STATISTICS_TRACKING */
155 static PULONG MiNonPagedPoolAllocMap
;
156 static ULONG MiNonPagedPoolNrOfPages
;
158 /* avl helper functions ****************************************************/
160 void DumpFreeBlockNode(PNODE p
)
162 static int count
= 0;
169 DumpFreeBlockNode(p
->link
[0]);
170 blk
= CONTAINING_RECORD(p
, HDR_FREE
, Node
);
171 DbgPrint("%08x %8d (%d)\n", blk
, blk
->hdr
.Size
, count
);
172 DumpFreeBlockNode(p
->link
[1]);
177 void DumpFreeBlockTree(void)
179 DbgPrint("--- Begin tree ------------------\n");
180 DbgPrint("%08x\n", CONTAINING_RECORD(FreeBlockListRoot
, HDR_FREE
, Node
));
181 DumpFreeBlockNode(FreeBlockListRoot
);
182 DbgPrint("--- End tree --------------------\n");
185 int compare_node(PNODE p1
, PNODE p2
)
187 HDR_FREE
* blk1
= CONTAINING_RECORD(p1
, HDR_FREE
, Node
);
188 HDR_FREE
* blk2
= CONTAINING_RECORD(p2
, HDR_FREE
, Node
);
190 if (blk1
->hdr
.Size
== blk2
->hdr
.Size
)
203 if (blk1
->hdr
.Size
< blk2
->hdr
.Size
)
207 if (blk1
->hdr
.Size
> blk2
->hdr
.Size
)
216 int compare_value(PVOID value
, PNODE p
)
218 HDR_FREE
* blk
= CONTAINING_RECORD(p
, HDR_FREE
, Node
);
219 ULONG v
= *(PULONG
)value
;
221 if (v
< blk
->hdr
.Size
)
225 if (v
> blk
->hdr
.Size
)
232 /* avl functions **********************************************************/
235 * The avl functions should be moved into a separate file.
238 /* The avl_insert and avl_remove are based on libavl (library for manipulation of binary trees). */
240 void avl_insert (PNODE
* root
, PNODE n
, int (*compare
)(PNODE
, PNODE
))
242 PNODE y
; /* Top node to update balance factor, and parent. */
243 PNODE p
, q
; /* Iterator, and parent. */
244 PNODE w
; /* New root of rebalanced subtree. */
245 int dir
= 0; /* Direction to descend. */
247 n
->link
[0] = n
->link
[1] = n
->parent
= NULL
;
251 for (q
= NULL
, p
= *root
; p
!= NULL
; q
= p
, p
= p
->link
[dir
])
253 dir
= compare(n
, p
) > 0;
275 for (p
= n
; p
!= y
; p
= q
)
278 dir
= q
->link
[0] != p
;
289 if (y
->balance
== -2)
291 PNODE x
= y
->link
[0];
292 if (x
->balance
== -1)
295 y
->link
[0] = x
->link
[1];
297 x
->balance
= y
->balance
= 0;
298 x
->parent
= y
->parent
;
300 if (y
->link
[0] != NULL
)
302 y
->link
[0]->parent
= y
;
307 ASSERT(x
->balance
== +1);
309 x
->link
[1] = w
->link
[0];
311 y
->link
[0] = w
->link
[1];
313 if (w
->balance
== -1)
318 else if (w
->balance
== 0)
320 x
->balance
= y
->balance
= 0;
322 else /* |w->pavl_balance == +1| */
328 w
->parent
= y
->parent
;
329 x
->parent
= y
->parent
= w
;
330 if (x
->link
[1] != NULL
)
332 x
->link
[1]->parent
= x
;
334 if (y
->link
[0] != NULL
)
336 y
->link
[0]->parent
= y
;
340 else if (y
->balance
== +2)
342 PNODE x
= y
->link
[1];
343 if (x
->balance
== +1)
346 y
->link
[1] = x
->link
[0];
348 x
->balance
= y
->balance
= 0;
349 x
->parent
= y
->parent
;
351 if (y
->link
[1] != NULL
)
353 y
->link
[1]->parent
= y
;
358 ASSERT(x
->balance
== -1);
360 x
->link
[0] = w
->link
[1];
362 y
->link
[1] = w
->link
[0];
369 else if (w
->balance
== 0)
371 x
->balance
= y
->balance
= 0;
373 else /* |w->pavl_balance == -1| */
379 w
->parent
= y
->parent
;
380 x
->parent
= y
->parent
= w
;
381 if (x
->link
[0] != NULL
)
383 x
->link
[0]->parent
= x
;
385 if (y
->link
[1] != NULL
)
387 y
->link
[1]->parent
= y
;
395 if (w
->parent
!= NULL
)
397 w
->parent
->link
[y
!= w
->parent
->link
[0]] = w
;
407 void avl_remove (PNODE
*root
, PNODE item
, int (*compare
)(PNODE
, PNODE
))
409 PNODE p
; /* Traverses tree to find node to delete. */
410 PNODE q
; /* Parent of |p|. */
411 int dir
; /* Side of |q| on which |p| is linked. */
413 if (root
== NULL
|| *root
== NULL
)
427 dir
= compare(p
, q
) > 0;
430 if (p
->link
[1] == NULL
)
432 q
->link
[dir
] = p
->link
[0];
433 if (q
->link
[dir
] != NULL
)
435 q
->link
[dir
]->parent
= p
->parent
;
440 PNODE r
= p
->link
[1];
441 if (r
->link
[0] == NULL
)
443 r
->link
[0] = p
->link
[0];
445 r
->parent
= p
->parent
;
446 if (r
->link
[0] != NULL
)
448 r
->link
[0]->parent
= r
;
450 r
->balance
= p
->balance
;
456 PNODE s
= r
->link
[0];
457 while (s
->link
[0] != NULL
)
462 r
->link
[0] = s
->link
[1];
463 s
->link
[0] = p
->link
[0];
464 s
->link
[1] = p
->link
[1];
466 if (s
->link
[0] != NULL
)
468 s
->link
[0]->parent
= s
;
470 s
->link
[1]->parent
= s
;
471 s
->parent
= p
->parent
;
472 if (r
->link
[0] != NULL
)
474 r
->link
[0]->parent
= r
;
476 s
->balance
= p
->balance
;
482 item
->link
[0] = item
->link
[1] = item
->parent
= NULL
;
485 while (q
!= (PNODE
) root
)
489 if (y
->parent
!= NULL
)
500 dir
= q
->link
[0] != y
;
502 if (y
->balance
== +1)
506 else if (y
->balance
== +2)
508 PNODE x
= y
->link
[1];
509 if (x
->balance
== -1)
513 ASSERT(x
->balance
== -1);
515 x
->link
[0] = w
->link
[1];
517 y
->link
[1] = w
->link
[0];
519 if (w
->balance
== +1)
524 else if (w
->balance
== 0)
526 x
->balance
= y
->balance
= 0;
528 else /* |w->pavl_balance == -1| */
534 w
->parent
= y
->parent
;
535 x
->parent
= y
->parent
= w
;
536 if (x
->link
[0] != NULL
)
538 x
->link
[0]->parent
= x
;
540 if (y
->link
[1] != NULL
)
542 y
->link
[1]->parent
= y
;
548 y
->link
[1] = x
->link
[0];
550 x
->parent
= y
->parent
;
552 if (y
->link
[1] != NULL
)
554 y
->link
[1]->parent
= y
;
565 x
->balance
= y
->balance
= 0;
573 dir
= q
->link
[0] != y
;
575 if (y
->balance
== -1)
579 else if (y
->balance
== -2)
581 PNODE x
= y
->link
[0];
582 if (x
->balance
== +1)
585 ASSERT(x
->balance
== +1);
587 x
->link
[1] = w
->link
[0];
589 y
->link
[0] = w
->link
[1];
591 if (w
->balance
== -1)
596 else if (w
->balance
== 0)
598 x
->balance
= y
->balance
= 0;
600 else /* |w->pavl_balance == +1| */
606 w
->parent
= y
->parent
;
607 x
->parent
= y
->parent
= w
;
608 if (x
->link
[1] != NULL
)
610 x
->link
[1]->parent
= x
;
612 if (y
->link
[0] != NULL
)
614 y
->link
[0]->parent
= y
;
620 y
->link
[0] = x
->link
[1];
622 x
->parent
= y
->parent
;
624 if (y
->link
[0] != NULL
)
626 y
->link
[0]->parent
= y
;
637 x
->balance
= y
->balance
= 0;
647 PNODE _cdecl
avl_get_first(PNODE root
)
662 PNODE
avl_get_next(PNODE root
, PNODE p
)
677 while (q
&& q
->link
[1] == p
)
693 PNODE
avl_find_equal_or_greater(PNODE root
, ULONG size
, int (compare
)(PVOID
, PNODE
))
699 for (p
= root
; p
!= NULL
;)
701 cmp
= compare((PVOID
)&size
, p
);
715 cmp
= compare((PVOID
)&size
, p
->link
[0]);
728 /* non paged pool functions ************************************************/
730 #ifdef TAG_STATISTICS_TRACKING
732 MiRemoveFromTagHashTable(HDR_USED
* block
)
734 * Remove a block from the tag hash table
741 RemoveEntryList(&block
->TagListEntry
);
745 MiAddToTagHashTable(HDR_USED
* block
)
747 * Add a block to the tag hash table
757 hash
= block
->Tag
% TAG_HASH_TABLE_SIZE
;
759 InsertHeadList(&tag_hash_table
[hash
], &block
->TagListEntry
);
761 #endif /* TAG_STATISTICS_TRACKING */
763 #if defined(TAG_STATISTICS_TRACKING)
765 MiDumpTagStats(ULONG CurrentTag
, ULONG CurrentNrBlocks
, ULONG CurrentSize
)
769 c1
= (CHAR
)((CurrentTag
>> 24) & 0xFF);
770 c2
= (CHAR
)((CurrentTag
>> 16) & 0xFF);
771 c3
= (CHAR
)((CurrentTag
>> 8) & 0xFF);
772 c4
= (CHAR
)(CurrentTag
& 0xFF);
774 if (isprint(c1
) && isprint(c2
) && isprint(c3
) && isprint(c4
))
776 DbgPrint("Tag %x (%c%c%c%c) Blocks %d Total Size %d Average Size %d\n",
777 CurrentTag
, c4
, c3
, c2
, c1
, CurrentNrBlocks
,
778 CurrentSize
, CurrentSize
/ CurrentNrBlocks
);
782 DbgPrint("Tag %x Blocks %d Total Size %d Average Size %d ",
783 CurrentTag
, CurrentNrBlocks
, CurrentSize
,
784 CurrentSize
/ CurrentNrBlocks
);
785 KeRosPrintAddress((PVOID
)CurrentTag
);
789 #endif /* defined(TAG_STATISTICS_TRACKING) */
793 MiDebugDumpNonPagedPoolStats(BOOLEAN NewOnly
)
795 #if defined(TAG_STATISTICS_TRACKING)
799 ULONG CurrentNrBlocks
= 0;
800 ULONG CurrentSize
= 0;
804 LIST_ENTRY tmpListHead
;
805 PLIST_ENTRY current_entry
;
807 DbgPrint("******* Dumping non paging pool stats ******\n");
810 for (i
= 0; i
< TAG_HASH_TABLE_SIZE
; i
++)
812 InitializeListHead(&tmpListHead
);
814 while (!IsListEmpty(&tag_hash_table
[i
]))
818 current_entry
= tag_hash_table
[i
].Flink
;
819 while (current_entry
!= &tag_hash_table
[i
])
821 current
= CONTAINING_RECORD(current_entry
, HDR_USED
, TagListEntry
);
822 current_entry
= current_entry
->Flink
;
825 CurrentTag
= current
->Tag
;
829 if (current
->Tag
== CurrentTag
)
831 RemoveEntryList(¤t
->TagListEntry
);
832 InsertHeadList(&tmpListHead
, ¤t
->TagListEntry
);
833 if (!NewOnly
|| !current
->Dumped
)
837 CurrentSize
+= current
->hdr
.Size
;
838 TotalSize
+= current
->hdr
.Size
;
839 current
->Dumped
= TRUE
;
843 if (CurrentTag
!= 0 && CurrentNrBlocks
!= 0)
845 MiDumpTagStats(CurrentTag
, CurrentNrBlocks
, CurrentSize
);
848 if (!IsListEmpty(&tmpListHead
))
850 tag_hash_table
[i
].Flink
= tmpListHead
.Flink
;
851 tag_hash_table
[i
].Flink
->Blink
= &tag_hash_table
[i
];
852 tag_hash_table
[i
].Blink
= tmpListHead
.Blink
;
853 tag_hash_table
[i
].Blink
->Flink
= &tag_hash_table
[i
];
856 if (TotalBlocks
!= 0)
858 DbgPrint("TotalBlocks %d TotalSize %d AverageSize %d\n",
859 TotalBlocks
, TotalSize
, TotalSize
/ TotalBlocks
);
863 DbgPrint("TotalBlocks %d TotalSize %d\n",
864 TotalBlocks
, TotalSize
);
866 Size
= EiFreeNonPagedPool
- (MiNonPagedPoolLength
- MiNonPagedPoolNrOfPages
* PAGE_SIZE
);
867 DbgPrint("Freeblocks %d TotalFreeSize %d AverageFreeSize %d\n",
868 EiNrFreeBlocks
, Size
, EiNrFreeBlocks
? Size
/ EiNrFreeBlocks
: 0);
869 DbgPrint("***************** Dump Complete ***************\n");
870 #endif /* defined(TAG_STATISTICS_TRACKING) */
875 MiDebugDumpNonPagedPool(BOOLEAN NewOnly
)
877 #if defined(POOL_DEBUG_APIS)
879 PLIST_ENTRY current_entry
;
882 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
884 DbgPrint("******* Dumping non paging pool contents ******\n");
885 current_entry
= UsedBlockListHead
.Flink
;
886 while (current_entry
!= &UsedBlockListHead
)
888 current
= CONTAINING_RECORD(current_entry
, HDR_USED
, ListEntry
);
889 if (!NewOnly
|| !current
->Dumped
)
893 c1
= (CHAR
)((current
->Tag
>> 24) & 0xFF);
894 c2
= (CHAR
)((current
->Tag
>> 16) & 0xFF);
895 c3
= (CHAR
)((current
->Tag
>> 8) & 0xFF);
896 c4
= (CHAR
)(current
->Tag
& 0xFF);
898 if (isprint(c1
) && isprint(c2
) && isprint(c3
) && isprint(c4
))
900 DbgPrint("Size 0x%x Tag 0x%x (%c%c%c%c) Allocator 0x%x\n",
901 current
->hdr
.Size
, current
->Tag
, c4
, c3
, c2
, c1
,
906 DbgPrint("Size 0x%x Tag 0x%x Allocator 0x%x\n",
907 current
->hdr
.Size
, current
->Tag
, current
->Caller
);
909 current
->Dumped
= TRUE
;
911 current_entry
= current_entry
->Flink
;
913 DbgPrint("***************** Dump Complete ***************\n");
914 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
918 #ifdef ENABLE_VALIDATE_POOL
919 static void validate_free_list(void)
921 * FUNCTION: Validate the integrity of the list of free blocks
925 unsigned int blocks_seen
=0;
928 p
= avl_get_first(FreeBlockListRoot
);
934 current
= CONTAINING_RECORD(p
, HDR_FREE
, Node
);
935 base_addr
= (PVOID
)current
;
937 if (current
->hdr
.Magic
!= BLOCK_HDR_FREE_MAGIC
)
939 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
941 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
944 if (base_addr
< MiNonPagedPoolStart
||
945 (ULONG_PTR
)base_addr
+ current
->hdr
.Size
> (ULONG_PTR
)MiNonPagedPoolStart
+ MiNonPagedPoolLength
)
947 DbgPrint("Block %x found outside pool area\n",current
);
948 DbgPrint("Size %d\n",current
->hdr
.Size
);
949 DbgPrint("Limits are %x %x\n",MiNonPagedPoolStart
,
950 (ULONG_PTR
)MiNonPagedPoolStart
+MiNonPagedPoolLength
);
951 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
954 if (blocks_seen
> EiNrFreeBlocks
)
956 DbgPrint("Too many blocks on free list\n");
957 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
959 p
= avl_get_next(FreeBlockListRoot
, p
);
963 static void validate_used_list(void)
965 * FUNCTION: Validate the integrity of the list of used blocks
969 PLIST_ENTRY current_entry
;
970 unsigned int blocks_seen
=0;
972 current_entry
= UsedBlockListHead
.Flink
;
973 while (current_entry
!= &UsedBlockListHead
)
977 current
= CONTAINING_RECORD(current_entry
, HDR_USED
, ListEntry
);
978 base_addr
= (PVOID
)current
;
980 if (current
->hdr
.Magic
!= BLOCK_HDR_USED_MAGIC
)
982 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
984 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
986 if (base_addr
< MiNonPagedPoolStart
||
987 ((ULONG_PTR
)base_addr
+current
->hdr
.Size
) >
988 (ULONG_PTR
)MiNonPagedPoolStart
+MiNonPagedPoolLength
)
990 DbgPrint("Block %x found outside pool area\n",current
);
991 DbgPrint("Size %d\n",current
->hdr
.Size
);
992 DbgPrint("Limits are %x %x\n",MiNonPagedPoolStart
,
993 (ULONG_PTR
)MiNonPagedPoolStart
+MiNonPagedPoolLength
);
994 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
997 if (blocks_seen
> EiNrUsedBlocks
)
999 DbgPrint("Too many blocks on used list\n");
1000 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1002 if (current
->ListEntry
.Flink
!= &UsedBlockListHead
&&
1003 current
->ListEntry
.Flink
->Blink
!= ¤t
->ListEntry
)
1005 DbgPrint("%s:%d:Break in list (current %x next %x "
1006 "current->next->previous %x)\n",
1007 __FILE__
,__LINE__
,current
, current
->ListEntry
.Flink
,
1008 current
->ListEntry
.Flink
->Blink
);
1009 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1012 current_entry
= current_entry
->Flink
;
1016 static void check_duplicates(HDR
* blk
)
1018 * FUNCTION: Check a block has no duplicates
1020 * blk = block to check
1021 * NOTE: Bug checks if duplicates are found
1024 ULONG_PTR base
= (ULONG_PTR
)blk
;
1025 ULONG_PTR last
= (ULONG_PTR
)blk
+ blk
->Size
;
1026 PLIST_ENTRY current_entry
;
1031 p
= avl_get_first(FreeBlockListRoot
);
1035 free
= CONTAINING_RECORD(p
, HDR_FREE
, Node
);
1036 if (free
->hdr
.Magic
!= BLOCK_HDR_FREE_MAGIC
)
1038 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
1040 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1043 if ( (ULONG_PTR
)free
> base
&& (ULONG_PTR
)free
< last
)
1045 DbgPrint("intersecting blocks on list\n");
1046 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1048 if ( (ULONG_PTR
)free
< base
&&
1049 ((ULONG_PTR
)free
+ free
->hdr
.Size
) > base
)
1051 DbgPrint("intersecting blocks on list\n");
1052 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1054 p
= avl_get_next(FreeBlockListRoot
, p
);
1057 current_entry
= UsedBlockListHead
.Flink
;
1058 while (current_entry
!= &UsedBlockListHead
)
1060 used
= CONTAINING_RECORD(current_entry
, HDR_USED
, ListEntry
);
1062 if ( (ULONG_PTR
)used
> base
&& (ULONG_PTR
)used
< last
)
1064 DbgPrint("intersecting blocks on list\n");
1065 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1067 if ( (ULONG_PTR
)used
< base
&&
1068 ((ULONG_PTR
)used
+ used
->hdr
.Size
) > base
)
1070 DbgPrint("intersecting blocks on list\n");
1071 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1074 current_entry
= current_entry
->Flink
;
1079 static void validate_kernel_pool(void)
1081 * FUNCTION: Checks the integrity of the kernel memory heap
1086 PLIST_ENTRY current_entry
;
1089 validate_free_list();
1090 validate_used_list();
1092 p
= avl_get_first(FreeBlockListRoot
);
1095 free
= CONTAINING_RECORD(p
, HDR_FREE
, Node
);
1096 check_duplicates(&free
->hdr
);
1097 p
= avl_get_next(FreeBlockListRoot
, p
);
1099 current_entry
= UsedBlockListHead
.Flink
;
1100 while (current_entry
!= &UsedBlockListHead
)
1102 used
= CONTAINING_RECORD(current_entry
, HDR_USED
, ListEntry
);
1103 check_duplicates(&used
->hdr
);
1104 current_entry
= current_entry
->Flink
;
1111 free_pages(HDR_FREE
* blk
)
1116 start
= (ULONG_PTR
)blk
;
1117 end
= (ULONG_PTR
)blk
+ blk
->hdr
.Size
;
1120 * If the block doesn't contain a whole page then there is nothing to do
1122 if (PAGE_ROUND_UP(start
) >= PAGE_ROUND_DOWN(end
))
1129 static void remove_from_used_list(HDR_USED
* current
)
1131 RemoveEntryList(¤t
->ListEntry
);
1132 EiUsedNonPagedPool
-= current
->hdr
.Size
;
1136 static void remove_from_free_list(HDR_FREE
* current
)
1138 DPRINT("remove_from_free_list %d\n", current
->hdr
.Size
);
1140 avl_remove(&FreeBlockListRoot
, ¤t
->Node
, compare_node
);
1142 EiFreeNonPagedPool
-= current
->hdr
.Size
;
1144 DPRINT("remove_from_free_list done\n");
1147 DumpFreeBlockTree();
1152 add_to_free_list(HDR_FREE
* blk
)
1154 * FUNCTION: add the block to the free list (internal)
1158 BOOL UpdatePrevPtr
= FALSE
;
1160 DPRINT("add_to_free_list (%d)\n", blk
->hdr
.Size
);
1164 current
= (HDR_FREE
*)blk
->hdr
.previous
;
1165 if (current
&& current
->hdr
.Magic
== BLOCK_HDR_FREE_MAGIC
)
1167 remove_from_free_list(current
);
1168 current
->hdr
.Size
= current
->hdr
.Size
+ blk
->hdr
.Size
;
1169 current
->hdr
.Magic
= BLOCK_HDR_USED_MAGIC
;
1170 memset(blk
, 0xcc, HDR_USED_SIZE
);
1172 UpdatePrevPtr
= TRUE
;
1175 current
= (HDR_FREE
*)((ULONG_PTR
)blk
+ blk
->hdr
.Size
);
1176 if ((char*)current
< (char*)MiNonPagedPoolStart
+ MiNonPagedPoolLength
&&
1177 current
->hdr
.Magic
== BLOCK_HDR_FREE_MAGIC
)
1179 remove_from_free_list(current
);
1180 blk
->hdr
.Size
+= current
->hdr
.Size
;
1181 memset(current
, 0xcc, HDR_FREE_SIZE
);
1182 UpdatePrevPtr
= TRUE
;
1183 current
= (HDR_FREE
*)((ULONG_PTR
)blk
+ blk
->hdr
.Size
);
1185 if (UpdatePrevPtr
&&
1186 (char*)current
< (char*)MiNonPagedPoolStart
+ MiNonPagedPoolLength
)
1188 current
->hdr
.previous
= &blk
->hdr
;
1190 DPRINT("%d\n", blk
->hdr
.Size
);
1191 blk
->hdr
.Magic
= BLOCK_HDR_FREE_MAGIC
;
1192 EiFreeNonPagedPool
+= blk
->hdr
.Size
;
1193 avl_insert(&FreeBlockListRoot
, &blk
->Node
, compare_node
);
1194 DPRINT("add_to_free_list done\n");
1197 DumpFreeBlockTree();
1201 static void add_to_used_list(HDR_USED
* blk
)
1203 * FUNCTION: add the block to the used list (internal)
1206 InsertHeadList(&UsedBlockListHead
, &blk
->ListEntry
);
1207 EiUsedNonPagedPool
+= blk
->hdr
.Size
;
1213 grow_block(HDR_FREE
* blk
, PVOID end
)
1217 ULONG_PTR StartIndex
, EndIndex
;
1220 StartIndex
= (ULONG_PTR
)(PAGE_ROUND_UP((ULONG_PTR
)blk
+ HDR_FREE_SIZE
- (ULONG_PTR
)MiNonPagedPoolStart
)) / PAGE_SIZE
;
1221 EndIndex
= ((ULONG_PTR
)PAGE_ROUND_UP(end
) - (ULONG_PTR
)MiNonPagedPoolStart
) / PAGE_SIZE
;
1224 for (i
= StartIndex
; i
< EndIndex
; i
++)
1226 if (!(MiNonPagedPoolAllocMap
[i
/ 32] & (1 << (i
% 32))))
1228 for (j
= i
+ 1; j
< EndIndex
&& j
- i
< 32; j
++)
1230 if (MiNonPagedPoolAllocMap
[j
/ 32] & (1 << (j
% 32)))
1235 for (k
= 0; k
< j
- i
; k
++)
1237 Status
= MmRequestPageMemoryConsumer(MC_NPPOOL
, FALSE
, &Page
[k
]);
1238 if (!NT_SUCCESS(Status
))
1240 for (i
= 0; i
< k
; i
++)
1242 MmReleasePageMemoryConsumer(MC_NPPOOL
, Page
[i
]);
1247 Status
= MmCreateVirtualMapping(NULL
,
1248 (PVOID
)((ULONG_PTR
)MiNonPagedPoolStart
+ i
* PAGE_SIZE
),
1249 PAGE_READWRITE
|PAGE_SYSTEM
,
1252 if (!NT_SUCCESS(Status
))
1254 for (i
= 0; i
< k
; i
++)
1256 MmReleasePageMemoryConsumer(MC_NPPOOL
, Page
[i
]);
1260 for (j
= i
; j
< k
+ i
; j
++)
1262 MiNonPagedPoolAllocMap
[j
/ 32] |= (1 << (j
% 32));
1264 MiNonPagedPoolNrOfPages
+= k
;
1271 static HDR_USED
* get_block(unsigned int size
, unsigned long alignment
)
1273 HDR_FREE
*blk
, *current
, *previous
= NULL
, *next
= NULL
, *best
= NULL
;
1274 ULONG previous_size
= 0, current_size
, next_size
= 0, new_size
;
1276 PVOID addr
, aligned_addr
, best_aligned_addr
=NULL
;
1279 DPRINT("get_block %d\n", size
);
1281 p
= avl_find_equal_or_greater(FreeBlockListRoot
, size
+ HDR_USED_SIZE
, compare_value
);
1284 current
= CONTAINING_RECORD(p
, HDR_FREE
, Node
);
1285 addr
= (PVOID
)((ULONG_PTR
)current
+ HDR_USED_SIZE
);
1286 /* calculate first aligned address available within this block */
1287 aligned_addr
= alignment
> 0 ? MM_ROUND_UP(addr
, alignment
) : addr
;
1288 if (size
< PAGE_SIZE
)
1290 /* check that the block is in one page */
1291 if (PAGE_ROUND_DOWN(aligned_addr
) != PAGE_ROUND_DOWN((ULONG_PTR
)aligned_addr
+ size
- 1))
1293 aligned_addr
= (PVOID
)PAGE_ROUND_UP(aligned_addr
);
1296 DPRINT("%x %x\n", addr
, aligned_addr
);
1297 if (aligned_addr
!= addr
)
1299 while((ULONG_PTR
)aligned_addr
- (ULONG_PTR
)addr
< HDR_FREE_SIZE
)
1303 aligned_addr
= (PVOID
)((ULONG_PTR
)current
+ HDR_USED_SIZE
+ HDR_FREE_SIZE
);
1307 aligned_addr
= MM_ROUND_UP((PVOID
)((ULONG_PTR
)current
+ HDR_USED_SIZE
+ HDR_FREE_SIZE
), alignment
);
1309 if (size
< PAGE_SIZE
)
1311 /* check that the block is in one page */
1312 if (PAGE_ROUND_DOWN(aligned_addr
) != PAGE_ROUND_DOWN((ULONG_PTR
)aligned_addr
+ size
- 1))
1314 aligned_addr
= (PVOID
)PAGE_ROUND_UP(aligned_addr
);
1319 DPRINT("%x %x\n", addr
, aligned_addr
);
1320 new_size
= (ULONG_PTR
)aligned_addr
- (ULONG_PTR
)addr
+ size
;
1321 if (current
->hdr
.Size
>= new_size
+ HDR_USED_SIZE
&&
1322 (best
== NULL
|| current
->hdr
.Size
< best
->hdr
.Size
))
1325 best_aligned_addr
= aligned_addr
;
1326 if (new_size
<= size
+ 2 * HDR_FREE_SIZE
)
1334 if (size
< PAGE_SIZE
)
1336 if (current
->hdr
.Size
>= 2 * PAGE_SIZE
+ HDR_FREE_SIZE
)
1343 if (current
->hdr
.Size
>= size
+ alignment
+ HDR_FREE_SIZE
)
1349 p
= avl_get_next(FreeBlockListRoot
, p
);
1352 * We didn't find anything suitable at all.
1359 DPRINT(":: blk %x blk->hdr.Size %d (%d) Size %d\n", best
, best
->hdr
.Size
, best
->hdr
.Size
- HDR_USED_SIZE
, size
);
1362 current_size
= current
->hdr
.Size
- HDR_USED_SIZE
;
1363 addr
= (PVOID
)((ULONG_PTR
)current
+ HDR_USED_SIZE
);
1364 if (addr
!= best_aligned_addr
)
1366 blk
= (HDR_FREE
*)((ULONG_PTR
)best_aligned_addr
- HDR_USED_SIZE
);
1368 * if size-aligned, break off the preceding bytes into their own block...
1371 previous_size
= (ULONG_PTR
)blk
- (ULONG_PTR
)previous
- HDR_FREE_SIZE
;
1373 current_size
-= ((ULONG_PTR
)current
- (ULONG_PTR
)previous
);
1375 end
= (PVOID
)((ULONG_PTR
)current
+ HDR_USED_SIZE
+ size
);
1377 if (current_size
>= size
+ HDR_FREE_SIZE
+ MM_POOL_ALIGNMENT
)
1379 /* create a new free block after our block, if the memory size is >= 4 byte for this block */
1380 next
= (HDR_FREE
*)((ULONG_PTR
)current
+ size
+ HDR_USED_SIZE
);
1381 next_size
= current_size
- size
- HDR_FREE_SIZE
;
1382 current_size
= size
;
1383 end
= (PVOID
)((ULONG_PTR
)next
+ HDR_FREE_SIZE
);
1388 remove_from_free_list(previous
);
1389 if (!grow_block(previous
, end
))
1391 add_to_free_list(previous
);
1394 memset(current
, 0, HDR_USED_SIZE
);
1395 current
->hdr
.Size
= current_size
+ HDR_USED_SIZE
;
1396 current
->hdr
.Magic
= BLOCK_HDR_USED_MAGIC
;
1397 current
->hdr
.previous
= &previous
->hdr
;
1398 previous
->hdr
.Size
= previous_size
+ HDR_FREE_SIZE
;
1401 blk
= (HDR_FREE
*)((ULONG_PTR
)current
+ current
->hdr
.Size
);
1402 if ((ULONG_PTR
)blk
< (ULONG_PTR
)MiNonPagedPoolStart
+ MiNonPagedPoolLength
)
1404 blk
->hdr
.previous
= ¤t
->hdr
;
1408 add_to_free_list(previous
);
1412 remove_from_free_list(current
);
1414 if (!grow_block(current
, end
))
1416 add_to_free_list(current
);
1419 current
->hdr
.Magic
= BLOCK_HDR_USED_MAGIC
;
1422 current
->hdr
.Size
= current_size
+ HDR_USED_SIZE
;
1428 memset(next
, 0, HDR_FREE_SIZE
);
1429 next
->hdr
.Size
= next_size
+ HDR_FREE_SIZE
;
1430 next
->hdr
.Magic
= BLOCK_HDR_FREE_MAGIC
;
1431 next
->hdr
.previous
= ¤t
->hdr
;
1432 blk
= (HDR_FREE
*)((ULONG_PTR
)next
+ next
->hdr
.Size
);
1433 if ((ULONG_PTR
)blk
< (ULONG_PTR
)MiNonPagedPoolStart
+ MiNonPagedPoolLength
)
1435 blk
->hdr
.previous
= &next
->hdr
;
1437 add_to_free_list(next
);
1440 add_to_used_list((HDR_USED
*)current
);
1443 if (size
< PAGE_SIZE
)
1445 addr
= (PVOID
)((ULONG_PTR
)current
+ HDR_USED_SIZE
);
1446 if (PAGE_ROUND_DOWN(addr
) != PAGE_ROUND_DOWN((PVOID
)((ULONG_PTR
)addr
+ size
- 1)))
1448 DPRINT1("%x %x\n", addr
, (ULONG_PTR
)addr
+ size
);
1450 ASSERT (PAGE_ROUND_DOWN(addr
) == PAGE_ROUND_DOWN((PVOID
)((ULONG_PTR
)addr
+ size
- 1)));
1454 addr
= (PVOID
)((ULONG_PTR
)current
+ HDR_USED_SIZE
);
1455 ASSERT(MM_ROUND_UP(addr
, alignment
) == addr
);
1457 return (HDR_USED
*)current
;
1461 ExRosQueryNonPagedPoolTag ( PVOID Addr
)
1463 HDR_USED
* blk
=(HDR_USED
*)((ULONG_PTR
)Addr
- HDR_USED_SIZE
);
1464 if (blk
->hdr
.Magic
!= BLOCK_HDR_USED_MAGIC
)
1470 #if defined(NPOOL_REDZONE_CHECK) || defined(NPOOL_REDZONE_CHECK_FULL)
1471 void check_redzone_header(HDR_USED
* hdr
)
1473 PBYTE LoZone
= (PBYTE
)((ULONG_PTR
)hdr
+ HDR_USED_SIZE
- NPOOL_REDZONE_SIZE
);
1474 PBYTE HiZone
= (PBYTE
)((ULONG_PTR
)hdr
+ HDR_USED_SIZE
+ hdr
->UserSize
);
1480 for (i
= 0; i
< NPOOL_REDZONE_SIZE
; i
++)
1482 if (LoZone
[i
] != NPOOL_REDZONE_LOVALUE
)
1486 if (HiZone
[i
] != NPOOL_REDZONE_HIVALUE
)
1494 c
[0] = (CHAR
)((hdr
->Tag
>> 24) & 0xFF);
1495 c
[1] = (CHAR
)((hdr
->Tag
>> 16) & 0xFF);
1496 c
[2] = (CHAR
)((hdr
->Tag
>> 8) & 0xFF);
1497 c
[3] = (CHAR
)(hdr
->Tag
& 0xFF);
1500 if (!isprint(c
[0]) || !isprint(c
[1]) || !isprint(c
[2]) || !isprint(c
[3]))
1507 DbgPrint("NPOOL: Low-side redzone overwritten, Block %x, Size %d, Tag %x(%s), Caller %x\n",
1508 (ULONG_PTR
)hdr
+ HDR_USED_SIZE
, hdr
->UserSize
, hdr
->Tag
, c
, hdr
->Caller
);
1512 DbgPrint("NPPOL: High-side redzone overwritten, Block %x, Size %d, Tag %x(%s), Caller %x\n",
1513 (ULONG_PTR
)hdr
+ HDR_USED_SIZE
, hdr
->UserSize
, hdr
->Tag
, c
, hdr
->Caller
);
1520 #ifdef NPOOL_REDZONE_CHECK_FULL
1521 void check_redzone_list(void)
1523 PLIST_ENTRY current_entry
;
1525 current_entry
= UsedBlockListHead
.Flink
;
1526 while (current_entry
!= &UsedBlockListHead
)
1528 check_redzone_header(CONTAINING_RECORD(current_entry
, HDR_USED
, ListEntry
));
1529 current_entry
= current_entry
->Flink
;
1535 VOID STDCALL
ExFreeNonPagedPool (PVOID block
)
1537 * FUNCTION: Releases previously allocated memory
1539 * block = block to free
1542 HDR_USED
* blk
=(HDR_USED
*)((ULONG_PTR
)block
- HDR_USED_SIZE
);
1550 DPRINT("freeing block %x\n",blk
);
1552 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block
,blk
->hdr
.Size
,
1554 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
1557 if (blk
->hdr
.Magic
!= BLOCK_HDR_USED_MAGIC
)
1559 if (blk
->hdr
.Magic
== BLOCK_HDR_FREE_MAGIC
)
1561 DbgPrint("ExFreePool of already freed address %x\n", block
);
1565 DbgPrint("ExFreePool of non-allocated address %x (magic %x)\n",
1566 block
, blk
->hdr
.Magic
);
1572 #if defined(NPOOL_REDZONE_CHECK) || defined(NPOOL_REDZONE_CHECK_FULL)
1573 check_redzone_header(blk
);
1576 #ifdef NPOOL_REDZONE_CHECK_FULL
1577 check_redzone_list();
1580 memset(block
, 0xcc, blk
->hdr
.Size
- HDR_USED_SIZE
);
1582 #ifdef TAG_STATISTICS_TRACKING
1583 MiRemoveFromTagHashTable(blk
);
1586 remove_from_used_list(blk
);
1587 blk
->hdr
.Magic
= BLOCK_HDR_FREE_MAGIC
;
1588 add_to_free_list((HDR_FREE
*)blk
);
1590 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
1594 ExAllocateNonPagedPoolWithTag(POOL_TYPE Type
, ULONG Size
, ULONG Tag
, PVOID Caller
)
1596 #if defined(NPOOL_REDZONE_CHECK) || defined(NPOOL_REDZONE_CHECK_FULL)
1600 HDR_USED
* best
= NULL
;
1604 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
1607 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
1609 #ifdef NPOOL_REDZONE_CHECK_FULL
1610 check_redzone_list();
1616 /* after some allocations print the npaged pool stats */
1617 #ifdef TAG_STATISTICS_TRACKING
1620 static ULONG counter
= 0;
1621 if (counter
++ % 100000 == 0)
1623 MiDebugDumpNonPagedPoolStats(FALSE
);
1624 MmPrintMemoryStatistic();
1630 * accomodate this useful idiom
1634 POOL_TRACE("= NULL\n");
1635 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
1638 /* Make the size dword alligned, this makes the block dword alligned */
1639 #if defined(NPOOL_REDZONE_CHECK) || defined(NPOOL_REDZONE_CHECK_FULL)
1641 Size
= ROUND_UP(Size
+ NPOOL_REDZONE_SIZE
, MM_POOL_ALIGNMENT
);
1643 Size
= ROUND_UP(Size
, MM_POOL_ALIGNMENT
);
1646 if (Size
>= PAGE_SIZE
)
1648 alignment
= PAGE_SIZE
;
1650 else if (Type
& CACHE_ALIGNED_POOL_MASK
)
1652 alignment
= MM_CACHE_LINE_SIZE
;
1659 best
= get_block(Size
, alignment
);
1662 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
1663 DPRINT1("Trying to allocate %lu bytes from nonpaged pool - nothing suitable found, returning NULL\n",
1665 KeRosDumpStackFrames(NULL
, 10);
1669 best
->Caller
= Caller
;
1670 best
->Dumped
= FALSE
;
1671 best
->TagListEntry
.Flink
= best
->TagListEntry
.Blink
= NULL
;
1672 #if defined(NPOOL_REDZONE_CHECK) || defined(NPOOL_REDZONE_CHECK_FULL)
1673 best
->UserSize
= UserSize
;
1674 memset((PVOID
)((ULONG_PTR
)best
+ HDR_USED_SIZE
- NPOOL_REDZONE_SIZE
), NPOOL_REDZONE_LOVALUE
, NPOOL_REDZONE_SIZE
);
1675 memset((PVOID
)((ULONG_PTR
)best
+ HDR_USED_SIZE
+ UserSize
), NPOOL_REDZONE_HIVALUE
, NPOOL_REDZONE_SIZE
);
1678 #ifdef TAG_STATISTICS_TRACKING
1680 MiAddToTagHashTable(best
);
1683 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
1684 block
= (PVOID
)((ULONG_PTR
)best
+ HDR_USED_SIZE
);
1685 /* RtlZeroMemory(block, Size);*/
1692 MiInitializeNonPagedPool(VOID
)
1700 #ifdef TAG_STATISTICS_TRACKING
1702 for (i
= 0; i
< TAG_HASH_TABLE_SIZE
; i
++)
1704 InitializeListHead(&tag_hash_table
[i
]);
1707 KeInitializeSpinLock(&MmNpoolLock
);
1708 InitializeListHead(&UsedBlockListHead
);
1709 InitializeListHead(&AddressListHead
);
1710 FreeBlockListRoot
= NULL
;
1712 MiNonPagedPoolAllocMap
= (PVOID
)((ULONG_PTR
)MiNonPagedPoolStart
+ PAGE_SIZE
);
1713 #if defined(NPOOL_REDZONE_CHECK) || defined(NPOOL_REDZONE_CHECK_FULL)
1714 MiNonPagedPoolNrOfPages
= ROUND_UP(MiNonPagedPoolLength
/ PAGE_SIZE
, 32) / 8;
1715 MiNonPagedPoolNrOfPages
= ROUND_UP(MiNonPagedPoolNrOfPages
+ NPOOL_REDZONE_SIZE
, MM_POOL_ALIGNMENT
);
1716 MiNonPagedPoolNrOfPages
= PAGE_ROUND_UP(MiNonPagedPoolNrOfPages
+ HDR_FREE_SIZE
) + PAGE_SIZE
;
1718 MiNonPagedPoolNrOfPages
= PAGE_ROUND_UP(ROUND_UP(MiNonPagedPoolLength
/ PAGE_SIZE
, 32) / 8 + HDR_FREE_SIZE
) + PAGE_SIZE
;
1720 MiNonPagedPoolNrOfPages
/= PAGE_SIZE
;
1721 Address
= MiNonPagedPoolStart
;
1723 for (i
= 0; i
< MiNonPagedPoolNrOfPages
; i
++)
1725 Status
= MmRequestPageMemoryConsumer(MC_NPPOOL
, FALSE
, &Page
);
1726 if (!NT_SUCCESS(Status
))
1728 DbgPrint("Unable to allocate a page\n");
1732 Status
= MmCreateVirtualMapping(NULL
,
1734 PAGE_READWRITE
|PAGE_SYSTEM
,
1737 if (!NT_SUCCESS(Status
))
1739 DbgPrint("Unable to create virtual mapping\n");
1742 Address
= (PVOID
)((ULONG_PTR
)Address
+ PAGE_SIZE
);
1745 for (i
= 0; i
< MiNonPagedPoolNrOfPages
; i
++)
1747 MiNonPagedPoolAllocMap
[i
/ 32] |= (1 << (i
% 32));
1750 /* the first block is free */
1751 free
= (HDR_FREE
*)MiNonPagedPoolStart
;
1752 free
->hdr
.Magic
= BLOCK_HDR_FREE_MAGIC
;
1753 free
->hdr
.Size
= PAGE_SIZE
- HDR_USED_SIZE
;
1754 free
->hdr
.previous
= NULL
;
1755 memset((PVOID
)((ULONG_PTR
)free
+ HDR_FREE_SIZE
), 0xcc, free
->hdr
.Size
- HDR_FREE_SIZE
);
1756 add_to_free_list(free
);
1758 /* the second block contains the non paged pool bitmap */
1759 used
= (HDR_USED
*)((ULONG_PTR
)free
+ free
->hdr
.Size
);
1760 used
->hdr
.Magic
= BLOCK_HDR_USED_MAGIC
;
1761 #if defined(NPOOL_REDZONE_CHECK) || defined(NPOOL_REDZONE_CHECK_FULL)
1762 used
->UserSize
= ROUND_UP(MiNonPagedPoolLength
/ PAGE_SIZE
, 32) / 8;
1763 used
->hdr
.Size
= ROUND_UP(used
->UserSize
+ NPOOL_REDZONE_SIZE
, MM_POOL_ALIGNMENT
) + HDR_USED_SIZE
;
1764 memset((PVOID
)((ULONG_PTR
)used
+ HDR_USED_SIZE
- NPOOL_REDZONE_SIZE
), NPOOL_REDZONE_LOVALUE
, NPOOL_REDZONE_SIZE
);
1765 memset((PVOID
)((ULONG_PTR
)used
+ HDR_USED_SIZE
+ used
->UserSize
), NPOOL_REDZONE_HIVALUE
, NPOOL_REDZONE_SIZE
);
1767 used
->hdr
.Size
= ROUND_UP(MiNonPagedPoolLength
/ PAGE_SIZE
, 32) / 8 + HDR_USED_SIZE
;
1769 used
->hdr
.previous
= &free
->hdr
;
1770 used
->Tag
= 0xffffffff;
1771 used
->Caller
= (PVOID
)MiInitializeNonPagedPool
;
1772 used
->Dumped
= FALSE
;
1773 add_to_used_list(used
);
1774 #ifdef TAG_STATISTICS_TRACKING
1775 MiAddToTagHashTable(used
);
1778 /* the third block is the free block after the bitmap */
1779 free
= (HDR_FREE
*)((ULONG_PTR
)used
+ used
->hdr
.Size
);
1780 free
->hdr
.Magic
= BLOCK_HDR_FREE_MAGIC
;
1781 free
->hdr
.Size
= MiNonPagedPoolLength
- ((ULONG_PTR
)free
- (ULONG_PTR
)MiNonPagedPoolStart
);
1782 free
->hdr
.previous
= &used
->hdr
;
1783 memset((PVOID
)((ULONG_PTR
)free
+ HDR_FREE_SIZE
), 0xcc, (ULONG_PTR
)Address
- (ULONG_PTR
)free
- HDR_FREE_SIZE
);
1784 add_to_free_list(free
);
1789 MiAllocateSpecialPool (IN POOL_TYPE PoolType
,
1790 IN SIZE_T NumberOfBytes
,
1795 /* FIXME: Special Pools not Supported */
1796 DbgPrint("Special Pools not supported\n");