3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/npool.c
6 * PURPOSE: Implements the kernel memory pool
8 * PROGRAMMERS: David Welch (welch@cwcom.net)
9 * Iwan Fatahi (i_fatahi@hotmail.com)
10 * Robert Bergkvist (fragdance@hotmail.com)
14 /* INCLUDES ****************************************************************/
18 #include <internal/debug.h>
20 #ifdef ENABLE_VALIDATE_POOL
21 #define VALIDATE_POOL validate_kernel_pool()
27 #define POOL_TRACE(args...) do { DbgPrint(args); } while(0);
30 #define POOL_TRACE(args...)
36 /* avl types ****************************************************************/
39 * This declarations should be moved into a separate header file.
44 struct _NODE
* link
[2];
50 /* TYPES *******************************************************************/
52 #define BLOCK_HDR_USED_MAGIC (0xdeadbeef)
53 #define BLOCK_HDR_FREE_MAGIC (0xceadbeef)
56 * fields present at the start of a block (this is for internal use only)
62 struct _HDR
* previous
;
65 typedef struct _HDR_USED
71 LIST_ENTRY TagListEntry
;
73 } HDR_USED
, *PHDR_USED
;
75 typedef struct _HDR_FREE
79 } HDR_FREE
, *PHDR_FREE
;
81 #define HDR_FREE_SIZE ROUND_UP(sizeof(HDR_FREE), MM_POOL_ALIGNMENT)
82 #define HDR_USED_SIZE ROUND_UP(sizeof(HDR_USED), MM_POOL_ALIGNMENT)
85 /* GLOBALS *****************************************************************/
87 extern PVOID MiNonPagedPoolStart
;
88 extern ULONG MiNonPagedPoolLength
;
91 * Head of the list of free blocks
93 static PNODE FreeBlockListRoot
= NULL
;
96 * Head of the list of in use block
98 static LIST_ENTRY UsedBlockListHead
;
100 static LIST_ENTRY AddressListHead
;
103 * Count of free blocks
105 static ULONG EiNrFreeBlocks
= 0;
108 * Count of used blocks
110 static ULONG EiNrUsedBlocks
= 0;
113 * Lock that protects the non-paged pool data structures
115 static KSPIN_LOCK MmNpoolLock
;
118 * Total memory used for free nonpaged pool blocks
120 ULONG EiFreeNonPagedPool
= 0;
123 * Total memory used for nonpaged pool blocks
125 ULONG EiUsedNonPagedPool
= 0;
127 /* Total quota for Non Paged Pool */
128 ULONG MmTotalNonPagedPoolQuota
= 0;
131 * Allocate a range of memory in the nonpaged pool
134 MiAllocNonPagedPoolRegion(unsigned int nr_pages
);
137 MiFreeNonPagedPoolRegion(PVOID Addr
, ULONG Count
, BOOLEAN Free
);
139 #ifdef TAG_STATISTICS_TRACKING
140 #define TAG_HASH_TABLE_SIZE (1024)
141 static LIST_ENTRY tag_hash_table
[TAG_HASH_TABLE_SIZE
];
142 #endif /* TAG_STATISTICS_TRACKING */
144 static PULONG MiNonPagedPoolAllocMap
;
145 static ULONG MiNonPagedPoolNrOfPages
;
147 /* avl helper functions ****************************************************/
149 void DumpFreeBlockNode(PNODE p
)
151 static int count
= 0;
158 DumpFreeBlockNode(p
->link
[0]);
159 blk
= CONTAINING_RECORD(p
, HDR_FREE
, Node
);
160 DbgPrint("%08x %8d (%d)\n", blk
, blk
->hdr
.Size
, count
);
161 DumpFreeBlockNode(p
->link
[1]);
166 void DumpFreeBlockTree(void)
168 DbgPrint("--- Begin tree ------------------\n");
169 DbgPrint("%08x\n", CONTAINING_RECORD(FreeBlockListRoot
, HDR_FREE
, Node
));
170 DumpFreeBlockNode(FreeBlockListRoot
);
171 DbgPrint("--- End tree --------------------\n");
174 int compare_node(PNODE p1
, PNODE p2
)
176 HDR_FREE
* blk1
= CONTAINING_RECORD(p1
, HDR_FREE
, Node
);
177 HDR_FREE
* blk2
= CONTAINING_RECORD(p2
, HDR_FREE
, Node
);
179 if (blk1
->hdr
.Size
== blk2
->hdr
.Size
)
192 if (blk1
->hdr
.Size
< blk2
->hdr
.Size
)
196 if (blk1
->hdr
.Size
> blk2
->hdr
.Size
)
205 int compare_value(PVOID value
, PNODE p
)
207 HDR_FREE
* blk
= CONTAINING_RECORD(p
, HDR_FREE
, Node
);
208 ULONG v
= *(PULONG
)value
;
210 if (v
< blk
->hdr
.Size
)
214 if (v
> blk
->hdr
.Size
)
221 /* avl functions **********************************************************/
224 * The avl functions should be moved into a separate file.
227 /* The avl_insert and avl_remove are based on libavl (library for manipulation of binary trees). */
229 void avl_insert (PNODE
* root
, PNODE n
, int (*compare
)(PNODE
, PNODE
))
231 PNODE y
; /* Top node to update balance factor, and parent. */
232 PNODE p
, q
; /* Iterator, and parent. */
233 PNODE w
; /* New root of rebalanced subtree. */
234 int dir
= 0; /* Direction to descend. */
236 n
->link
[0] = n
->link
[1] = n
->parent
= NULL
;
240 for (q
= NULL
, p
= *root
; p
!= NULL
; q
= p
, p
= p
->link
[dir
])
242 dir
= compare(n
, p
) > 0;
264 for (p
= n
; p
!= y
; p
= q
)
267 dir
= q
->link
[0] != p
;
278 if (y
->balance
== -2)
280 PNODE x
= y
->link
[0];
281 if (x
->balance
== -1)
284 y
->link
[0] = x
->link
[1];
286 x
->balance
= y
->balance
= 0;
287 x
->parent
= y
->parent
;
289 if (y
->link
[0] != NULL
)
291 y
->link
[0]->parent
= y
;
296 ASSERT(x
->balance
== +1);
298 x
->link
[1] = w
->link
[0];
300 y
->link
[0] = w
->link
[1];
302 if (w
->balance
== -1)
307 else if (w
->balance
== 0)
309 x
->balance
= y
->balance
= 0;
311 else /* |w->pavl_balance == +1| */
317 w
->parent
= y
->parent
;
318 x
->parent
= y
->parent
= w
;
319 if (x
->link
[1] != NULL
)
321 x
->link
[1]->parent
= x
;
323 if (y
->link
[0] != NULL
)
325 y
->link
[0]->parent
= y
;
329 else if (y
->balance
== +2)
331 PNODE x
= y
->link
[1];
332 if (x
->balance
== +1)
335 y
->link
[1] = x
->link
[0];
337 x
->balance
= y
->balance
= 0;
338 x
->parent
= y
->parent
;
340 if (y
->link
[1] != NULL
)
342 y
->link
[1]->parent
= y
;
347 ASSERT(x
->balance
== -1);
349 x
->link
[0] = w
->link
[1];
351 y
->link
[1] = w
->link
[0];
358 else if (w
->balance
== 0)
360 x
->balance
= y
->balance
= 0;
362 else /* |w->pavl_balance == -1| */
368 w
->parent
= y
->parent
;
369 x
->parent
= y
->parent
= w
;
370 if (x
->link
[0] != NULL
)
372 x
->link
[0]->parent
= x
;
374 if (y
->link
[1] != NULL
)
376 y
->link
[1]->parent
= y
;
384 if (w
->parent
!= NULL
)
386 w
->parent
->link
[y
!= w
->parent
->link
[0]] = w
;
396 void avl_remove (PNODE
*root
, PNODE item
, int (*compare
)(PNODE
, PNODE
))
398 PNODE p
; /* Traverses tree to find node to delete. */
399 PNODE q
; /* Parent of |p|. */
400 int dir
; /* Side of |q| on which |p| is linked. */
402 if (root
== NULL
|| *root
== NULL
)
416 dir
= compare(p
, q
) > 0;
419 if (p
->link
[1] == NULL
)
421 q
->link
[dir
] = p
->link
[0];
422 if (q
->link
[dir
] != NULL
)
424 q
->link
[dir
]->parent
= p
->parent
;
429 PNODE r
= p
->link
[1];
430 if (r
->link
[0] == NULL
)
432 r
->link
[0] = p
->link
[0];
434 r
->parent
= p
->parent
;
435 if (r
->link
[0] != NULL
)
437 r
->link
[0]->parent
= r
;
439 r
->balance
= p
->balance
;
445 PNODE s
= r
->link
[0];
446 while (s
->link
[0] != NULL
)
451 r
->link
[0] = s
->link
[1];
452 s
->link
[0] = p
->link
[0];
453 s
->link
[1] = p
->link
[1];
455 if (s
->link
[0] != NULL
)
457 s
->link
[0]->parent
= s
;
459 s
->link
[1]->parent
= s
;
460 s
->parent
= p
->parent
;
461 if (r
->link
[0] != NULL
)
463 r
->link
[0]->parent
= r
;
465 s
->balance
= p
->balance
;
471 item
->link
[0] = item
->link
[1] = item
->parent
= NULL
;
474 while (q
!= (PNODE
) root
)
478 if (y
->parent
!= NULL
)
489 dir
= q
->link
[0] != y
;
491 if (y
->balance
== +1)
495 else if (y
->balance
== +2)
497 PNODE x
= y
->link
[1];
498 if (x
->balance
== -1)
502 ASSERT(x
->balance
== -1);
504 x
->link
[0] = w
->link
[1];
506 y
->link
[1] = w
->link
[0];
508 if (w
->balance
== +1)
513 else if (w
->balance
== 0)
515 x
->balance
= y
->balance
= 0;
517 else /* |w->pavl_balance == -1| */
523 w
->parent
= y
->parent
;
524 x
->parent
= y
->parent
= w
;
525 if (x
->link
[0] != NULL
)
527 x
->link
[0]->parent
= x
;
529 if (y
->link
[1] != NULL
)
531 y
->link
[1]->parent
= y
;
537 y
->link
[1] = x
->link
[0];
539 x
->parent
= y
->parent
;
541 if (y
->link
[1] != NULL
)
543 y
->link
[1]->parent
= y
;
554 x
->balance
= y
->balance
= 0;
562 dir
= q
->link
[0] != y
;
564 if (y
->balance
== -1)
568 else if (y
->balance
== -2)
570 PNODE x
= y
->link
[0];
571 if (x
->balance
== +1)
574 ASSERT(x
->balance
== +1);
576 x
->link
[1] = w
->link
[0];
578 y
->link
[0] = w
->link
[1];
580 if (w
->balance
== -1)
585 else if (w
->balance
== 0)
587 x
->balance
= y
->balance
= 0;
589 else /* |w->pavl_balance == +1| */
595 w
->parent
= y
->parent
;
596 x
->parent
= y
->parent
= w
;
597 if (x
->link
[1] != NULL
)
599 x
->link
[1]->parent
= x
;
601 if (y
->link
[0] != NULL
)
603 y
->link
[0]->parent
= y
;
609 y
->link
[0] = x
->link
[1];
611 x
->parent
= y
->parent
;
613 if (y
->link
[0] != NULL
)
615 y
->link
[0]->parent
= y
;
626 x
->balance
= y
->balance
= 0;
636 PNODE _cdecl
avl_get_first(PNODE root
)
651 PNODE
avl_get_next(PNODE root
, PNODE p
)
666 while (q
&& q
->link
[1] == p
)
682 PNODE
avl_find_equal_or_greater(PNODE root
, ULONG size
, int (compare
)(PVOID
, PNODE
))
688 for (p
= root
; p
!= NULL
;)
690 cmp
= compare((PVOID
)&size
, p
);
704 cmp
= compare((PVOID
)&size
, p
->link
[0]);
717 /* non paged pool functions ************************************************/
719 #ifdef TAG_STATISTICS_TRACKING
721 MiRemoveFromTagHashTable(HDR_USED
* block
)
723 * Remove a block from the tag hash table
730 RemoveEntryList(&block
->TagListEntry
);
734 MiAddToTagHashTable(HDR_USED
* block
)
736 * Add a block to the tag hash table
746 hash
= block
->Tag
% TAG_HASH_TABLE_SIZE
;
748 InsertHeadList(&tag_hash_table
[hash
], &block
->TagListEntry
);
750 #endif /* TAG_STATISTICS_TRACKING */
752 #if defined(TAG_STATISTICS_TRACKING)
754 MiDumpTagStats(ULONG CurrentTag
, ULONG CurrentNrBlocks
, ULONG CurrentSize
)
758 c1
= (CHAR
)((CurrentTag
>> 24) & 0xFF);
759 c2
= (CHAR
)((CurrentTag
>> 16) & 0xFF);
760 c3
= (CHAR
)((CurrentTag
>> 8) & 0xFF);
761 c4
= (CHAR
)(CurrentTag
& 0xFF);
763 if (isprint(c1
) && isprint(c2
) && isprint(c3
) && isprint(c4
))
765 DbgPrint("Tag %x (%c%c%c%c) Blocks %d Total Size %d Average Size %d\n",
766 CurrentTag
, c4
, c3
, c2
, c1
, CurrentNrBlocks
,
767 CurrentSize
, CurrentSize
/ CurrentNrBlocks
);
771 DbgPrint("Tag %x Blocks %d Total Size %d Average Size %d\n",
772 CurrentTag
, CurrentNrBlocks
, CurrentSize
,
773 CurrentSize
/ CurrentNrBlocks
);
776 #endif /* defined(TAG_STATISTICS_TRACKING) */
779 MiDebugDumpNonPagedPoolStats(BOOLEAN NewOnly
)
781 #if defined(TAG_STATISTICS_TRACKING)
785 ULONG CurrentNrBlocks
= 0;
786 ULONG CurrentSize
= 0;
790 LIST_ENTRY tmpListHead
;
791 PLIST_ENTRY current_entry
;
793 DbgPrint("******* Dumping non paging pool stats ******\n");
796 for (i
= 0; i
< TAG_HASH_TABLE_SIZE
; i
++)
798 InitializeListHead(&tmpListHead
);
800 while (!IsListEmpty(&tag_hash_table
[i
]))
804 current_entry
= tag_hash_table
[i
].Flink
;
805 while (current_entry
!= &tag_hash_table
[i
])
807 current
= CONTAINING_RECORD(current_entry
, HDR_USED
, TagListEntry
);
808 current_entry
= current_entry
->Flink
;
811 CurrentTag
= current
->Tag
;
815 if (current
->Tag
== CurrentTag
)
817 RemoveEntryList(¤t
->TagListEntry
);
818 InsertHeadList(&tmpListHead
, ¤t
->TagListEntry
);
819 if (!NewOnly
|| !current
->Dumped
)
823 CurrentSize
+= current
->hdr
.Size
;
824 TotalSize
+= current
->hdr
.Size
;
825 current
->Dumped
= TRUE
;
829 if (CurrentTag
!= 0 && CurrentNrBlocks
!= 0)
831 MiDumpTagStats(CurrentTag
, CurrentNrBlocks
, CurrentSize
);
834 if (!IsListEmpty(&tmpListHead
))
836 tag_hash_table
[i
].Flink
= tmpListHead
.Flink
;
837 tag_hash_table
[i
].Flink
->Blink
= &tag_hash_table
[i
];
838 tag_hash_table
[i
].Blink
= tmpListHead
.Blink
;
839 tag_hash_table
[i
].Blink
->Flink
= &tag_hash_table
[i
];
842 if (TotalBlocks
!= 0)
844 DbgPrint("TotalBlocks %d TotalSize %d AverageSize %d\n",
845 TotalBlocks
, TotalSize
, TotalSize
/ TotalBlocks
);
849 DbgPrint("TotalBlocks %d TotalSize %d\n",
850 TotalBlocks
, TotalSize
);
852 Size
= EiFreeNonPagedPool
- (MiNonPagedPoolLength
- MiNonPagedPoolNrOfPages
* PAGE_SIZE
);
853 DbgPrint("Freeblocks %d TotalFreeSize %d AverageFreeSize %d\n",
854 EiNrFreeBlocks
, Size
, EiNrFreeBlocks
? Size
/ EiNrFreeBlocks
: 0);
855 DbgPrint("***************** Dump Complete ***************\n");
856 #endif /* defined(TAG_STATISTICS_TRACKING) */
860 MiDebugDumpNonPagedPool(BOOLEAN NewOnly
)
862 #if defined(POOL_DEBUG_APIS)
864 PLIST_ENTRY current_entry
;
867 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
869 DbgPrint("******* Dumping non paging pool contents ******\n");
870 current_entry
= UsedBlockListHead
.Flink
;
871 while (current_entry
!= &UsedBlockListHead
)
873 current
= CONTAINING_RECORD(current_entry
, HDR_USED
, ListEntry
);
874 if (!NewOnly
|| !current
->Dumped
)
878 c1
= (CHAR
)((current
->Tag
>> 24) & 0xFF);
879 c2
= (CHAR
)((current
->Tag
>> 16) & 0xFF);
880 c3
= (CHAR
)((current
->Tag
>> 8) & 0xFF);
881 c4
= (CHAR
)(current
->Tag
& 0xFF);
883 if (isprint(c1
) && isprint(c2
) && isprint(c3
) && isprint(c4
))
885 DbgPrint("Size 0x%x Tag 0x%x (%c%c%c%c) Allocator 0x%x\n",
886 current
->hdr
.Size
, current
->Tag
, c4
, c3
, c2
, c1
,
891 DbgPrint("Size 0x%x Tag 0x%x Allocator 0x%x\n",
892 current
->hdr
.Size
, current
->Tag
, current
->Caller
);
894 current
->Dumped
= TRUE
;
896 current_entry
= current_entry
->Flink
;
898 DbgPrint("***************** Dump Complete ***************\n");
899 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
903 #ifdef ENABLE_VALIDATE_POOL
904 static void validate_free_list(void)
906 * FUNCTION: Validate the integrity of the list of free blocks
910 unsigned int blocks_seen
=0;
913 p
= avl_get_first(FreeBlockListRoot
);
919 current
= CONTAINING_RECORD(p
, HDR_FREE
, Node
);
920 base_addr
= (PVOID
)current
;
922 if (current
->hdr
.Magic
!= BLOCK_HDR_FREE_MAGIC
)
924 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
926 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
929 if (base_addr
< MiNonPagedPoolStart
||
930 (ULONG_PTR
)base_addr
+ current
->hdr
.Size
> (ULONG_PTR
)MiNonPagedPoolStart
+ MiNonPagedPoolLength
)
932 DbgPrint("Block %x found outside pool area\n",current
);
933 DbgPrint("Size %d\n",current
->hdr
.Size
);
934 DbgPrint("Limits are %x %x\n",MiNonPagedPoolStart
,
935 (ULONG_PTR
)MiNonPagedPoolStart
+MiNonPagedPoolLength
);
936 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
939 if (blocks_seen
> EiNrFreeBlocks
)
941 DbgPrint("Too many blocks on free list\n");
942 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
944 p
= avl_get_next(FreeBlockListRoot
, p
);
948 static void validate_used_list(void)
950 * FUNCTION: Validate the integrity of the list of used blocks
954 PLIST_ENTRY current_entry
;
955 unsigned int blocks_seen
=0;
957 current_entry
= UsedBlockListHead
.Flink
;
958 while (current_entry
!= &UsedBlockListHead
)
962 current
= CONTAINING_RECORD(current_entry
, HDR_USED
, ListEntry
);
963 base_addr
= (PVOID
)current
;
965 if (current
->hdr
.Magic
!= BLOCK_HDR_USED_MAGIC
)
967 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
969 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
971 if (base_addr
< MiNonPagedPoolStart
||
972 ((ULONG_PTR
)base_addr
+current
->hdr
.Size
) >
973 (ULONG_PTR
)MiNonPagedPoolStart
+MiNonPagedPoolLength
)
975 DbgPrint("Block %x found outside pool area\n",current
);
976 DbgPrint("Size %d\n",current
->hdr
.Size
);
977 DbgPrint("Limits are %x %x\n",MiNonPagedPoolStart
,
978 (ULONG_PTR
)MiNonPagedPoolStart
+MiNonPagedPoolLength
);
979 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
982 if (blocks_seen
> EiNrUsedBlocks
)
984 DbgPrint("Too many blocks on used list\n");
985 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
987 if (current
->ListEntry
.Flink
!= &UsedBlockListHead
&&
988 current
->ListEntry
.Flink
->Blink
!= ¤t
->ListEntry
)
990 DbgPrint("%s:%d:Break in list (current %x next %x "
991 "current->next->previous %x)\n",
992 __FILE__
,__LINE__
,current
, current
->ListEntry
.Flink
,
993 current
->ListEntry
.Flink
->Blink
);
994 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
997 current_entry
= current_entry
->Flink
;
1001 static void check_duplicates(HDR
* blk
)
1003 * FUNCTION: Check a block has no duplicates
1005 * blk = block to check
1006 * NOTE: Bug checks if duplicates are found
1009 ULONG_PTR base
= (ULONG_PTR
)blk
;
1010 ULONG_PTR last
= (ULONG_PTR
)blk
+ blk
->Size
;
1011 PLIST_ENTRY current_entry
;
1016 p
= avl_get_first(FreeBlockListRoot
);
1020 free
= CONTAINING_RECORD(p
, HDR_FREE
, Node
);
1021 if (free
->hdr
.Magic
!= BLOCK_HDR_FREE_MAGIC
)
1023 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
1025 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1028 if ( (ULONG_PTR
)free
> base
&& (ULONG_PTR
)free
< last
)
1030 DbgPrint("intersecting blocks on list\n");
1031 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1033 if ( (ULONG_PTR
)free
< base
&&
1034 ((ULONG_PTR
)free
+ free
->hdr
.Size
) > base
)
1036 DbgPrint("intersecting blocks on list\n");
1037 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1039 p
= avl_get_next(FreeBlockListRoot
, p
);
1042 current_entry
= UsedBlockListHead
.Flink
;
1043 while (current_entry
!= &UsedBlockListHead
)
1045 used
= CONTAINING_RECORD(current_entry
, HDR_USED
, ListEntry
);
1047 if ( (ULONG_PTR
)used
> base
&& (ULONG_PTR
)used
< last
)
1049 DbgPrint("intersecting blocks on list\n");
1050 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1052 if ( (ULONG_PTR
)used
< base
&&
1053 ((ULONG_PTR
)used
+ used
->hdr
.Size
) > base
)
1055 DbgPrint("intersecting blocks on list\n");
1056 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1059 current_entry
= current_entry
->Flink
;
1064 static void validate_kernel_pool(void)
1066 * FUNCTION: Checks the integrity of the kernel memory heap
1071 PLIST_ENTRY current_entry
;
1074 validate_free_list();
1075 validate_used_list();
1077 p
= avl_get_first(FreeBlockListRoot
);
1080 free
= CONTAINING_RECORD(p
, HDR_FREE
, Node
);
1081 check_duplicates(&free
->hdr
);
1082 p
= avl_get_next(FreeBlockListRoot
, p
);
1084 current_entry
= UsedBlockListHead
.Flink
;
1085 while (current_entry
!= &UsedBlockListHead
)
1087 used
= CONTAINING_RECORD(current_entry
, HDR_USED
, ListEntry
);
1088 check_duplicates(&used
->hdr
);
1089 current_entry
= current_entry
->Flink
;
1096 free_pages(HDR_FREE
* blk
)
1101 start
= (ULONG_PTR
)blk
;
1102 end
= (ULONG_PTR
)blk
+ blk
->hdr
.Size
;
1105 * If the block doesn't contain a whole page then there is nothing to do
1107 if (PAGE_ROUND_UP(start
) >= PAGE_ROUND_DOWN(end
))
1114 static void remove_from_used_list(HDR_USED
* current
)
1116 RemoveEntryList(¤t
->ListEntry
);
1117 EiUsedNonPagedPool
-= current
->hdr
.Size
;
1121 static void remove_from_free_list(HDR_FREE
* current
)
1123 DPRINT("remove_from_free_list %d\n", current
->hdr
.Size
);
1125 avl_remove(&FreeBlockListRoot
, ¤t
->Node
, compare_node
);
1127 EiFreeNonPagedPool
-= current
->hdr
.Size
;
1129 DPRINT("remove_from_free_list done\n");
1132 DumpFreeBlockTree();
1137 add_to_free_list(HDR_FREE
* blk
)
1139 * FUNCTION: add the block to the free list (internal)
1143 BOOL UpdatePrevPtr
= FALSE
;
1145 DPRINT("add_to_free_list (%d)\n", blk
->hdr
.Size
);
1149 current
= (HDR_FREE
*)blk
->hdr
.previous
;
1150 if (current
&& current
->hdr
.Magic
== BLOCK_HDR_FREE_MAGIC
)
1152 remove_from_free_list(current
);
1153 current
->hdr
.Size
= current
->hdr
.Size
+ blk
->hdr
.Size
;
1154 current
->hdr
.Magic
= BLOCK_HDR_USED_MAGIC
;
1155 memset(blk
, 0xcc, HDR_USED_SIZE
);
1157 UpdatePrevPtr
= TRUE
;
1160 current
= (HDR_FREE
*)((ULONG_PTR
)blk
+ blk
->hdr
.Size
);
1161 if ((char*)current
< (char*)MiNonPagedPoolStart
+ MiNonPagedPoolLength
&&
1162 current
->hdr
.Magic
== BLOCK_HDR_FREE_MAGIC
)
1164 remove_from_free_list(current
);
1165 blk
->hdr
.Size
+= current
->hdr
.Size
;
1166 memset(current
, 0xcc, HDR_FREE_SIZE
);
1167 UpdatePrevPtr
= TRUE
;
1168 current
= (HDR_FREE
*)((ULONG_PTR
)blk
+ blk
->hdr
.Size
);
1170 if (UpdatePrevPtr
&&
1171 (char*)current
< (char*)MiNonPagedPoolStart
+ MiNonPagedPoolLength
)
1173 current
->hdr
.previous
= &blk
->hdr
;
1175 DPRINT("%d\n", blk
->hdr
.Size
);
1176 blk
->hdr
.Magic
= BLOCK_HDR_FREE_MAGIC
;
1177 EiFreeNonPagedPool
+= blk
->hdr
.Size
;
1178 avl_insert(&FreeBlockListRoot
, &blk
->Node
, compare_node
);
1179 DPRINT("add_to_free_list done\n");
1182 DumpFreeBlockTree();
1186 static void add_to_used_list(HDR_USED
* blk
)
1188 * FUNCTION: add the block to the used list (internal)
1191 InsertHeadList(&UsedBlockListHead
, &blk
->ListEntry
);
1192 EiUsedNonPagedPool
+= blk
->hdr
.Size
;
1198 grow_block(HDR_FREE
* blk
, PVOID end
)
1202 ULONG_PTR StartIndex
, EndIndex
;
1205 StartIndex
= (ULONG_PTR
)(PAGE_ROUND_UP((ULONG_PTR
)blk
+ HDR_FREE_SIZE
- (ULONG_PTR
)MiNonPagedPoolStart
)) / PAGE_SIZE
;
1206 EndIndex
= ((ULONG_PTR
)PAGE_ROUND_UP(end
) - (ULONG_PTR
)MiNonPagedPoolStart
) / PAGE_SIZE
;
1209 for (i
= StartIndex
; i
< EndIndex
; i
++)
1211 if (!(MiNonPagedPoolAllocMap
[i
/ 32] & (1 << (i
% 32))))
1213 for (j
= i
+ 1; j
< EndIndex
&& j
- i
< 32; j
++)
1215 if (MiNonPagedPoolAllocMap
[j
/ 32] & (1 << (j
% 32)))
1220 for (k
= 0; k
< j
- i
; k
++)
1222 Status
= MmRequestPageMemoryConsumer(MC_NPPOOL
, FALSE
, &Page
[k
]);
1223 if (!NT_SUCCESS(Status
))
1225 for (i
= 0; i
< k
; i
++)
1227 MmReleasePageMemoryConsumer(MC_NPPOOL
, Page
[i
]);
1232 Status
= MmCreateVirtualMapping(NULL
,
1233 (PVOID
)((ULONG_PTR
)MiNonPagedPoolStart
+ i
* PAGE_SIZE
),
1234 PAGE_READWRITE
|PAGE_SYSTEM
,
1237 if (!NT_SUCCESS(Status
))
1239 for (i
= 0; i
< k
; i
++)
1241 MmReleasePageMemoryConsumer(MC_NPPOOL
, Page
[i
]);
1245 for (j
= i
; j
< k
+ i
; j
++)
1247 MiNonPagedPoolAllocMap
[j
/ 32] |= (1 << (j
% 32));
1249 MiNonPagedPoolNrOfPages
+= k
;
1256 static HDR_USED
* get_block(unsigned int size
, unsigned long alignment
)
1258 HDR_FREE
*blk
, *current
, *previous
= NULL
, *next
= NULL
, *best
= NULL
;
1259 ULONG previous_size
= 0, current_size
, next_size
= 0, new_size
;
1261 PVOID addr
, aligned_addr
, best_aligned_addr
=NULL
;
1264 DPRINT("get_block %d\n", size
);
1266 p
= avl_find_equal_or_greater(FreeBlockListRoot
, size
+ HDR_USED_SIZE
, compare_value
);
1269 current
= CONTAINING_RECORD(p
, HDR_FREE
, Node
);
1270 addr
= (PVOID
)((ULONG_PTR
)current
+ HDR_USED_SIZE
);
1271 /* calculate first aligned address available within this block */
1272 aligned_addr
= alignment
> 0 ? MM_ROUND_UP(addr
, alignment
) : addr
;
1273 if (size
< PAGE_SIZE
)
1275 /* check that the block is in one page */
1276 if (PAGE_ROUND_DOWN(aligned_addr
) != PAGE_ROUND_DOWN((ULONG_PTR
)aligned_addr
+ size
- 1))
1278 aligned_addr
= (PVOID
)PAGE_ROUND_UP(aligned_addr
);
1281 DPRINT("%x %x\n", addr
, aligned_addr
);
1282 if (aligned_addr
!= addr
)
1284 while((ULONG_PTR
)aligned_addr
- (ULONG_PTR
)addr
< HDR_FREE_SIZE
)
1288 aligned_addr
= (PVOID
)((ULONG_PTR
)current
+ HDR_USED_SIZE
+ HDR_FREE_SIZE
);
1292 aligned_addr
= MM_ROUND_UP((PVOID
)((ULONG_PTR
)current
+ HDR_USED_SIZE
+ HDR_FREE_SIZE
), alignment
);
1294 if (size
< PAGE_SIZE
)
1296 /* check that the block is in one page */
1297 if (PAGE_ROUND_DOWN(aligned_addr
) != PAGE_ROUND_DOWN((ULONG_PTR
)aligned_addr
+ size
- 1))
1299 aligned_addr
= (PVOID
)PAGE_ROUND_UP(aligned_addr
);
1304 DPRINT("%x %x\n", addr
, aligned_addr
);
1305 new_size
= (ULONG_PTR
)aligned_addr
- (ULONG_PTR
)addr
+ size
;
1306 if (current
->hdr
.Size
>= new_size
+ HDR_USED_SIZE
&&
1307 (best
== NULL
|| current
->hdr
.Size
< best
->hdr
.Size
))
1310 best_aligned_addr
= aligned_addr
;
1311 if (new_size
<= size
+ 2 * HDR_FREE_SIZE
)
1319 if (size
< PAGE_SIZE
)
1321 if (current
->hdr
.Size
>= 2 * PAGE_SIZE
+ HDR_FREE_SIZE
)
1328 if (current
->hdr
.Size
>= size
+ alignment
+ HDR_FREE_SIZE
)
1334 p
= avl_get_next(FreeBlockListRoot
, p
);
1337 * We didn't find anything suitable at all.
1344 DPRINT(":: blk %x blk->hdr.Size %d (%d) Size %d\n", best
, best
->hdr
.Size
, best
->hdr
.Size
- HDR_USED_SIZE
, size
);
1347 current_size
= current
->hdr
.Size
- HDR_USED_SIZE
;
1348 addr
= (PVOID
)((ULONG_PTR
)current
+ HDR_USED_SIZE
);
1349 if (addr
!= best_aligned_addr
)
1351 blk
= (HDR_FREE
*)((ULONG_PTR
)best_aligned_addr
- HDR_USED_SIZE
);
1353 * if size-aligned, break off the preceding bytes into their own block...
1356 previous_size
= (ULONG_PTR
)blk
- (ULONG_PTR
)previous
- HDR_FREE_SIZE
;
1358 current_size
-= ((ULONG_PTR
)current
- (ULONG_PTR
)previous
);
1360 end
= (PVOID
)((ULONG_PTR
)current
+ HDR_USED_SIZE
+ size
);
1362 if (current_size
>= size
+ HDR_FREE_SIZE
+ MM_POOL_ALIGNMENT
)
1364 /* create a new free block after our block, if the memory size is >= 4 byte for this block */
1365 next
= (HDR_FREE
*)((ULONG_PTR
)current
+ size
+ HDR_USED_SIZE
);
1366 next_size
= current_size
- size
- HDR_FREE_SIZE
;
1367 current_size
= size
;
1368 end
= (PVOID
)((ULONG_PTR
)next
+ HDR_FREE_SIZE
);
1373 remove_from_free_list(previous
);
1374 if (!grow_block(previous
, end
))
1376 add_to_free_list(previous
);
1379 memset(current
, 0, HDR_USED_SIZE
);
1380 current
->hdr
.Size
= current_size
+ HDR_USED_SIZE
;
1381 current
->hdr
.Magic
= BLOCK_HDR_USED_MAGIC
;
1382 current
->hdr
.previous
= &previous
->hdr
;
1383 previous
->hdr
.Size
= previous_size
+ HDR_FREE_SIZE
;
1386 blk
= (HDR_FREE
*)((ULONG_PTR
)current
+ current
->hdr
.Size
);
1387 if ((ULONG_PTR
)blk
< (ULONG_PTR
)MiNonPagedPoolStart
+ MiNonPagedPoolLength
)
1389 blk
->hdr
.previous
= ¤t
->hdr
;
1393 add_to_free_list(previous
);
1397 remove_from_free_list(current
);
1399 if (!grow_block(current
, end
))
1401 add_to_free_list(current
);
1404 current
->hdr
.Magic
= BLOCK_HDR_USED_MAGIC
;
1407 current
->hdr
.Size
= current_size
+ HDR_USED_SIZE
;
1413 memset(next
, 0, HDR_FREE_SIZE
);
1414 next
->hdr
.Size
= next_size
+ HDR_FREE_SIZE
;
1415 next
->hdr
.Magic
= BLOCK_HDR_FREE_MAGIC
;
1416 next
->hdr
.previous
= ¤t
->hdr
;
1417 blk
= (HDR_FREE
*)((ULONG_PTR
)next
+ next
->hdr
.Size
);
1418 if ((ULONG_PTR
)blk
< (ULONG_PTR
)MiNonPagedPoolStart
+ MiNonPagedPoolLength
)
1420 blk
->hdr
.previous
= &next
->hdr
;
1422 add_to_free_list(next
);
1425 add_to_used_list((HDR_USED
*)current
);
1428 if (size
< PAGE_SIZE
)
1430 addr
= (PVOID
)((ULONG_PTR
)current
+ HDR_USED_SIZE
);
1431 if (PAGE_ROUND_DOWN(addr
) != PAGE_ROUND_DOWN((PVOID
)((ULONG_PTR
)addr
+ size
- 1)))
1433 DPRINT1("%x %x\n", addr
, (ULONG_PTR
)addr
+ size
);
1435 ASSERT (PAGE_ROUND_DOWN(addr
) == PAGE_ROUND_DOWN((PVOID
)((ULONG_PTR
)addr
+ size
- 1)));
1439 addr
= (PVOID
)((ULONG_PTR
)current
+ HDR_USED_SIZE
);
1440 ASSERT(MM_ROUND_UP(addr
, alignment
) == addr
);
1442 return (HDR_USED
*)current
;
1446 ExRosQueryNonPagedPoolTag ( PVOID Addr
)
1448 HDR_USED
* blk
=(HDR_USED
*)((ULONG_PTR
)Addr
- HDR_USED_SIZE
);
1449 if (blk
->hdr
.Magic
!= BLOCK_HDR_USED_MAGIC
)
1455 VOID STDCALL
ExFreeNonPagedPool (PVOID block
)
1457 * FUNCTION: Releases previously allocated memory
1459 * block = block to free
1462 HDR_USED
* blk
=(HDR_USED
*)((ULONG_PTR
)block
- HDR_USED_SIZE
);
1470 DPRINT("freeing block %x\n",blk
);
1472 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block
,blk
->hdr
.Size
,
1474 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
1477 if (blk
->hdr
.Magic
!= BLOCK_HDR_USED_MAGIC
)
1479 if (blk
->hdr
.Magic
== BLOCK_HDR_FREE_MAGIC
)
1481 DbgPrint("ExFreePool of already freed address %x\n", block
);
1485 DbgPrint("ExFreePool of non-allocated address %x (magic %x)\n",
1486 block
, blk
->hdr
.Magic
);
1491 memset(block
, 0xcc, blk
->hdr
.Size
- HDR_USED_SIZE
);
1492 #ifdef TAG_STATISTICS_TRACKING
1494 MiRemoveFromTagHashTable(blk
);
1497 remove_from_used_list(blk
);
1498 blk
->hdr
.Magic
= BLOCK_HDR_FREE_MAGIC
;
1499 add_to_free_list((HDR_FREE
*)blk
);
1501 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
1505 ExAllocateNonPagedPoolWithTag(POOL_TYPE Type
, ULONG Size
, ULONG Tag
, PVOID Caller
)
1508 HDR_USED
* best
= NULL
;
1512 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
1515 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
1520 /* after some allocations print the npaged pool stats */
1521 #ifdef TAG_STATISTICS_TRACKING
1524 static ULONG counter
= 0;
1525 if (counter
++ % 100000 == 0)
1527 MiDebugDumpNonPagedPoolStats(FALSE
);
1533 * accomodate this useful idiom
1537 POOL_TRACE("= NULL\n");
1538 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
1541 /* Make the size dword alligned, this makes the block dword alligned */
1542 Size
= ROUND_UP(Size
, MM_POOL_ALIGNMENT
);
1544 if (Size
>= PAGE_SIZE
)
1546 alignment
= PAGE_SIZE
;
1548 else if (Type
& CACHE_ALIGNED_POOL_MASK
)
1550 alignment
= MM_CACHE_LINE_SIZE
;
1557 best
= get_block(Size
, alignment
);
1560 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
1561 DPRINT1("Trying to allocate %lu bytes from nonpaged pool - nothing suitable found, returning NULL\n",
1566 best
->Caller
= Caller
;
1567 best
->Dumped
= FALSE
;
1568 best
->TagListEntry
.Flink
= best
->TagListEntry
.Blink
= NULL
;
1569 #ifdef TAG_STATISTICS_TRACKING
1571 MiAddToTagHashTable(best
);
1574 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
1575 block
= (PVOID
)((ULONG_PTR
)best
+ HDR_USED_SIZE
);
1576 /* RtlZeroMemory(block, Size);*/
1581 MiInitializeNonPagedPool(VOID
)
1589 #ifdef TAG_STATISTICS_TRACKING
1591 for (i
= 0; i
< TAG_HASH_TABLE_SIZE
; i
++)
1593 InitializeListHead(&tag_hash_table
[i
]);
1596 KeInitializeSpinLock(&MmNpoolLock
);
1597 InitializeListHead(&UsedBlockListHead
);
1598 InitializeListHead(&AddressListHead
);
1599 FreeBlockListRoot
= NULL
;
1601 MiNonPagedPoolAllocMap
= (PVOID
)((ULONG_PTR
)MiNonPagedPoolStart
+ PAGE_SIZE
);
1602 MiNonPagedPoolNrOfPages
= PAGE_ROUND_UP(ROUND_UP(MiNonPagedPoolLength
/ PAGE_SIZE
, 32) / 8 + HDR_FREE_SIZE
) + PAGE_SIZE
;
1603 MiNonPagedPoolNrOfPages
/= PAGE_SIZE
;
1604 Address
= MiNonPagedPoolStart
;
1606 for (i
= 0; i
< MiNonPagedPoolNrOfPages
; i
++)
1608 Status
= MmRequestPageMemoryConsumer(MC_NPPOOL
, FALSE
, &Page
);
1609 if (!NT_SUCCESS(Status
))
1611 DbgPrint("Unable to allocate a page\n");
1615 Status
= MmCreateVirtualMapping(NULL
,
1617 PAGE_READWRITE
|PAGE_SYSTEM
,
1620 if (!NT_SUCCESS(Status
))
1622 DbgPrint("Unable to create virtual mapping\n");
1625 Address
= (PVOID
)((ULONG_PTR
)Address
+ PAGE_SIZE
);
1628 for (i
= 0; i
< MiNonPagedPoolNrOfPages
; i
++)
1630 MiNonPagedPoolAllocMap
[i
/ 32] |= (1 << (i
% 32));
1633 /* the first block is free */
1634 free
= (HDR_FREE
*)MiNonPagedPoolStart
;
1635 free
->hdr
.Magic
= BLOCK_HDR_FREE_MAGIC
;
1636 free
->hdr
.Size
= PAGE_SIZE
- HDR_USED_SIZE
;
1637 free
->hdr
.previous
= NULL
;
1638 memset((PVOID
)((ULONG_PTR
)free
+ HDR_FREE_SIZE
), 0xcc, free
->hdr
.Size
- HDR_FREE_SIZE
);
1639 add_to_free_list(free
);
1641 /* the second block contains the non paged pool bitmap */
1642 used
= (HDR_USED
*)((ULONG_PTR
)free
+ free
->hdr
.Size
);
1643 used
->hdr
.Magic
= BLOCK_HDR_USED_MAGIC
;
1644 used
->hdr
.Size
= ROUND_UP(MiNonPagedPoolLength
/ PAGE_SIZE
, 32) / 8 + HDR_USED_SIZE
;
1645 used
->hdr
.previous
= &free
->hdr
;
1646 used
->Tag
= 0xffffffff;
1648 used
->Dumped
= FALSE
;
1649 add_to_used_list(used
);
1650 #ifdef TAG_STATISTICS_TRACKING
1651 MiAddToTagHashTable(used
);
1654 /* the third block is the free block after the bitmap */
1655 free
= (HDR_FREE
*)((ULONG_PTR
)used
+ used
->hdr
.Size
);
1656 free
->hdr
.Magic
= BLOCK_HDR_FREE_MAGIC
;
1657 free
->hdr
.Size
= MiNonPagedPoolLength
- ((ULONG_PTR
)free
- (ULONG_PTR
)MiNonPagedPoolStart
);
1658 free
->hdr
.previous
= &used
->hdr
;
1659 memset((PVOID
)((ULONG_PTR
)free
+ HDR_FREE_SIZE
), 0x0cc, (ULONG_PTR
)Address
- (ULONG_PTR
)free
- HDR_FREE_SIZE
);
1660 add_to_free_list(free
);
1665 MiAllocateSpecialPool (IN POOL_TYPE PoolType
,
1666 IN SIZE_T NumberOfBytes
,
1671 /* FIXME: Special Pools not Supported */
1672 DbgPrint("Special Pools not supported\n");