3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/npool.c
6 * PURPOSE: Implements the kernel memory pool
8 * PROGRAMMERS: David Welch (welch@cwcom.net)
9 * Iwan Fatahi (i_fatahi@hotmail.com)
10 * Robert Bergkvist (fragdance@hotmail.com)
14 /* INCLUDES ****************************************************************/
18 #include <internal/debug.h>
20 #ifdef ENABLE_VALIDATE_POOL
21 #define VALIDATE_POOL validate_kernel_pool()
27 #define POOL_TRACE(args...) do { DbgPrint(args); } while(0);
30 #define POOL_TRACE(args...)
36 /* avl types ****************************************************************/
39 * This declarations should be moved into a separate header file.
44 struct _NODE
* link
[2];
50 /* TYPES *******************************************************************/
52 #define BLOCK_HDR_USED_MAGIC (0xdeadbeef)
53 #define BLOCK_HDR_FREE_MAGIC (0xceadbeef)
56 * fields present at the start of a block (this is for internal use only)
62 struct _HDR
* previous
;
65 typedef struct _HDR_USED
71 LIST_ENTRY TagListEntry
;
73 } HDR_USED
, *PHDR_USED
;
75 typedef struct _HDR_FREE
79 } HDR_FREE
, *PHDR_FREE
;
81 #define HDR_FREE_SIZE ROUND_UP(sizeof(HDR_FREE), MM_POOL_ALIGNMENT)
82 #define HDR_USED_SIZE ROUND_UP(sizeof(HDR_USED), MM_POOL_ALIGNMENT)
85 /* GLOBALS *****************************************************************/
87 extern PVOID MiNonPagedPoolStart
;
88 extern ULONG MiNonPagedPoolLength
;
91 * Head of the list of free blocks
93 static PNODE FreeBlockListRoot
= NULL
;
96 * Head of the list of in use block
98 static LIST_ENTRY UsedBlockListHead
;
100 static LIST_ENTRY AddressListHead
;
103 * Count of free blocks
105 static ULONG EiNrFreeBlocks
= 0;
108 * Count of used blocks
110 static ULONG EiNrUsedBlocks
= 0;
113 * Lock that protects the non-paged pool data structures
115 static KSPIN_LOCK MmNpoolLock
;
118 * Total memory used for free nonpaged pool blocks
120 ULONG EiFreeNonPagedPool
= 0;
123 * Total memory used for nonpaged pool blocks
125 ULONG EiUsedNonPagedPool
= 0;
127 /* Total quota for Non Paged Pool */
128 ULONG MmTotalNonPagedPoolQuota
= 0;
131 * Allocate a range of memory in the nonpaged pool
134 MiAllocNonPagedPoolRegion(unsigned int nr_pages
);
137 MiFreeNonPagedPoolRegion(PVOID Addr
, ULONG Count
, BOOLEAN Free
);
139 #ifdef TAG_STATISTICS_TRACKING
140 #define TAG_HASH_TABLE_SIZE (1024)
141 static LIST_ENTRY tag_hash_table
[TAG_HASH_TABLE_SIZE
];
142 #endif /* TAG_STATISTICS_TRACKING */
144 static PULONG MiNonPagedPoolAllocMap
;
145 static ULONG MiNonPagedPoolNrOfPages
;
147 /* avl helper functions ****************************************************/
149 void DumpFreeBlockNode(PNODE p
)
151 static int count
= 0;
158 DumpFreeBlockNode(p
->link
[0]);
159 blk
= CONTAINING_RECORD(p
, HDR_FREE
, Node
);
160 DbgPrint("%08x %8d (%d)\n", blk
, blk
->hdr
.Size
, count
);
161 DumpFreeBlockNode(p
->link
[1]);
166 void DumpFreeBlockTree(void)
168 DbgPrint("--- Begin tree ------------------\n");
169 DbgPrint("%08x\n", CONTAINING_RECORD(FreeBlockListRoot
, HDR_FREE
, Node
));
170 DumpFreeBlockNode(FreeBlockListRoot
);
171 DbgPrint("--- End tree --------------------\n");
174 int compare_node(PNODE p1
, PNODE p2
)
176 HDR_FREE
* blk1
= CONTAINING_RECORD(p1
, HDR_FREE
, Node
);
177 HDR_FREE
* blk2
= CONTAINING_RECORD(p2
, HDR_FREE
, Node
);
179 if (blk1
->hdr
.Size
== blk2
->hdr
.Size
)
192 if (blk1
->hdr
.Size
< blk2
->hdr
.Size
)
196 if (blk1
->hdr
.Size
> blk2
->hdr
.Size
)
205 int compare_value(PVOID value
, PNODE p
)
207 HDR_FREE
* blk
= CONTAINING_RECORD(p
, HDR_FREE
, Node
);
208 ULONG v
= *(PULONG
)value
;
210 if (v
< blk
->hdr
.Size
)
214 if (v
> blk
->hdr
.Size
)
221 /* avl functions **********************************************************/
224 * The avl functions should be moved into a separate file.
227 /* The avl_insert and avl_remove are based on libavl (library for manipulation of binary trees). */
229 void avl_insert (PNODE
* root
, PNODE n
, int (*compare
)(PNODE
, PNODE
))
231 PNODE y
; /* Top node to update balance factor, and parent. */
232 PNODE p
, q
; /* Iterator, and parent. */
233 PNODE w
; /* New root of rebalanced subtree. */
234 int dir
= 0; /* Direction to descend. */
236 n
->link
[0] = n
->link
[1] = n
->parent
= NULL
;
240 for (q
= NULL
, p
= *root
; p
!= NULL
; q
= p
, p
= p
->link
[dir
])
242 dir
= compare(n
, p
) > 0;
264 for (p
= n
; p
!= y
; p
= q
)
267 dir
= q
->link
[0] != p
;
278 if (y
->balance
== -2)
280 PNODE x
= y
->link
[0];
281 if (x
->balance
== -1)
284 y
->link
[0] = x
->link
[1];
286 x
->balance
= y
->balance
= 0;
287 x
->parent
= y
->parent
;
289 if (y
->link
[0] != NULL
)
291 y
->link
[0]->parent
= y
;
296 ASSERT(x
->balance
== +1);
298 x
->link
[1] = w
->link
[0];
300 y
->link
[0] = w
->link
[1];
302 if (w
->balance
== -1)
307 else if (w
->balance
== 0)
309 x
->balance
= y
->balance
= 0;
311 else /* |w->pavl_balance == +1| */
317 w
->parent
= y
->parent
;
318 x
->parent
= y
->parent
= w
;
319 if (x
->link
[1] != NULL
)
321 x
->link
[1]->parent
= x
;
323 if (y
->link
[0] != NULL
)
325 y
->link
[0]->parent
= y
;
329 else if (y
->balance
== +2)
331 PNODE x
= y
->link
[1];
332 if (x
->balance
== +1)
335 y
->link
[1] = x
->link
[0];
337 x
->balance
= y
->balance
= 0;
338 x
->parent
= y
->parent
;
340 if (y
->link
[1] != NULL
)
342 y
->link
[1]->parent
= y
;
347 ASSERT(x
->balance
== -1);
349 x
->link
[0] = w
->link
[1];
351 y
->link
[1] = w
->link
[0];
358 else if (w
->balance
== 0)
360 x
->balance
= y
->balance
= 0;
362 else /* |w->pavl_balance == -1| */
368 w
->parent
= y
->parent
;
369 x
->parent
= y
->parent
= w
;
370 if (x
->link
[0] != NULL
)
372 x
->link
[0]->parent
= x
;
374 if (y
->link
[1] != NULL
)
376 y
->link
[1]->parent
= y
;
384 if (w
->parent
!= NULL
)
386 w
->parent
->link
[y
!= w
->parent
->link
[0]] = w
;
396 void avl_remove (PNODE
*root
, PNODE item
, int (*compare
)(PNODE
, PNODE
))
398 PNODE p
; /* Traverses tree to find node to delete. */
399 PNODE q
; /* Parent of |p|. */
400 int dir
; /* Side of |q| on which |p| is linked. */
402 if (root
== NULL
|| *root
== NULL
)
416 dir
= compare(p
, q
) > 0;
419 if (p
->link
[1] == NULL
)
421 q
->link
[dir
] = p
->link
[0];
422 if (q
->link
[dir
] != NULL
)
424 q
->link
[dir
]->parent
= p
->parent
;
429 PNODE r
= p
->link
[1];
430 if (r
->link
[0] == NULL
)
432 r
->link
[0] = p
->link
[0];
434 r
->parent
= p
->parent
;
435 if (r
->link
[0] != NULL
)
437 r
->link
[0]->parent
= r
;
439 r
->balance
= p
->balance
;
445 PNODE s
= r
->link
[0];
446 while (s
->link
[0] != NULL
)
451 r
->link
[0] = s
->link
[1];
452 s
->link
[0] = p
->link
[0];
453 s
->link
[1] = p
->link
[1];
455 if (s
->link
[0] != NULL
)
457 s
->link
[0]->parent
= s
;
459 s
->link
[1]->parent
= s
;
460 s
->parent
= p
->parent
;
461 if (r
->link
[0] != NULL
)
463 r
->link
[0]->parent
= r
;
465 s
->balance
= p
->balance
;
471 item
->link
[0] = item
->link
[1] = item
->parent
= NULL
;
474 while (q
!= (PNODE
) root
)
478 if (y
->parent
!= NULL
)
489 dir
= q
->link
[0] != y
;
491 if (y
->balance
== +1)
495 else if (y
->balance
== +2)
497 PNODE x
= y
->link
[1];
498 if (x
->balance
== -1)
502 ASSERT(x
->balance
== -1);
504 x
->link
[0] = w
->link
[1];
506 y
->link
[1] = w
->link
[0];
508 if (w
->balance
== +1)
513 else if (w
->balance
== 0)
515 x
->balance
= y
->balance
= 0;
517 else /* |w->pavl_balance == -1| */
523 w
->parent
= y
->parent
;
524 x
->parent
= y
->parent
= w
;
525 if (x
->link
[0] != NULL
)
527 x
->link
[0]->parent
= x
;
529 if (y
->link
[1] != NULL
)
531 y
->link
[1]->parent
= y
;
537 y
->link
[1] = x
->link
[0];
539 x
->parent
= y
->parent
;
541 if (y
->link
[1] != NULL
)
543 y
->link
[1]->parent
= y
;
554 x
->balance
= y
->balance
= 0;
562 dir
= q
->link
[0] != y
;
564 if (y
->balance
== -1)
568 else if (y
->balance
== -2)
570 PNODE x
= y
->link
[0];
571 if (x
->balance
== +1)
574 ASSERT(x
->balance
== +1);
576 x
->link
[1] = w
->link
[0];
578 y
->link
[0] = w
->link
[1];
580 if (w
->balance
== -1)
585 else if (w
->balance
== 0)
587 x
->balance
= y
->balance
= 0;
589 else /* |w->pavl_balance == +1| */
595 w
->parent
= y
->parent
;
596 x
->parent
= y
->parent
= w
;
597 if (x
->link
[1] != NULL
)
599 x
->link
[1]->parent
= x
;
601 if (y
->link
[0] != NULL
)
603 y
->link
[0]->parent
= y
;
609 y
->link
[0] = x
->link
[1];
611 x
->parent
= y
->parent
;
613 if (y
->link
[0] != NULL
)
615 y
->link
[0]->parent
= y
;
626 x
->balance
= y
->balance
= 0;
636 PNODE _cdecl
avl_get_first(PNODE root
)
651 PNODE
avl_get_next(PNODE root
, PNODE p
)
666 while (q
&& q
->link
[1] == p
)
682 PNODE
avl_find_equal_or_greater(PNODE root
, ULONG size
, int (compare
)(PVOID
, PNODE
))
688 for (p
= root
; p
!= NULL
;)
690 cmp
= compare((PVOID
)&size
, p
);
704 cmp
= compare((PVOID
)&size
, p
->link
[0]);
717 /* non paged pool functions ************************************************/
719 #ifdef TAG_STATISTICS_TRACKING
721 MiRemoveFromTagHashTable(HDR_USED
* block
)
723 * Remove a block from the tag hash table
730 RemoveEntryList(&block
->TagListEntry
);
734 MiAddToTagHashTable(HDR_USED
* block
)
736 * Add a block to the tag hash table
746 hash
= block
->Tag
% TAG_HASH_TABLE_SIZE
;
748 InsertHeadList(&tag_hash_table
[hash
], &block
->TagListEntry
);
750 #endif /* TAG_STATISTICS_TRACKING */
752 #if defined(TAG_STATISTICS_TRACKING)
754 MiDumpTagStats(ULONG CurrentTag
, ULONG CurrentNrBlocks
, ULONG CurrentSize
)
758 c1
= (CHAR
)((CurrentTag
>> 24) & 0xFF);
759 c2
= (CHAR
)((CurrentTag
>> 16) & 0xFF);
760 c3
= (CHAR
)((CurrentTag
>> 8) & 0xFF);
761 c4
= (CHAR
)(CurrentTag
& 0xFF);
763 if (isprint(c1
) && isprint(c2
) && isprint(c3
) && isprint(c4
))
765 DbgPrint("Tag %x (%c%c%c%c) Blocks %d Total Size %d Average Size %d\n",
766 CurrentTag
, c4
, c3
, c2
, c1
, CurrentNrBlocks
,
767 CurrentSize
, CurrentSize
/ CurrentNrBlocks
);
771 DbgPrint("Tag %x Blocks %d Total Size %d Average Size %d\n",
772 CurrentTag
, CurrentNrBlocks
, CurrentSize
,
773 CurrentSize
/ CurrentNrBlocks
);
776 #endif /* defined(TAG_STATISTICS_TRACKING) */
780 MiDebugDumpNonPagedPoolStats(BOOLEAN NewOnly
)
782 #if defined(TAG_STATISTICS_TRACKING)
786 ULONG CurrentNrBlocks
= 0;
787 ULONG CurrentSize
= 0;
791 LIST_ENTRY tmpListHead
;
792 PLIST_ENTRY current_entry
;
794 DbgPrint("******* Dumping non paging pool stats ******\n");
797 for (i
= 0; i
< TAG_HASH_TABLE_SIZE
; i
++)
799 InitializeListHead(&tmpListHead
);
801 while (!IsListEmpty(&tag_hash_table
[i
]))
805 current_entry
= tag_hash_table
[i
].Flink
;
806 while (current_entry
!= &tag_hash_table
[i
])
808 current
= CONTAINING_RECORD(current_entry
, HDR_USED
, TagListEntry
);
809 current_entry
= current_entry
->Flink
;
812 CurrentTag
= current
->Tag
;
816 if (current
->Tag
== CurrentTag
)
818 RemoveEntryList(¤t
->TagListEntry
);
819 InsertHeadList(&tmpListHead
, ¤t
->TagListEntry
);
820 if (!NewOnly
|| !current
->Dumped
)
824 CurrentSize
+= current
->hdr
.Size
;
825 TotalSize
+= current
->hdr
.Size
;
826 current
->Dumped
= TRUE
;
830 if (CurrentTag
!= 0 && CurrentNrBlocks
!= 0)
832 MiDumpTagStats(CurrentTag
, CurrentNrBlocks
, CurrentSize
);
835 if (!IsListEmpty(&tmpListHead
))
837 tag_hash_table
[i
].Flink
= tmpListHead
.Flink
;
838 tag_hash_table
[i
].Flink
->Blink
= &tag_hash_table
[i
];
839 tag_hash_table
[i
].Blink
= tmpListHead
.Blink
;
840 tag_hash_table
[i
].Blink
->Flink
= &tag_hash_table
[i
];
843 if (TotalBlocks
!= 0)
845 DbgPrint("TotalBlocks %d TotalSize %d AverageSize %d\n",
846 TotalBlocks
, TotalSize
, TotalSize
/ TotalBlocks
);
850 DbgPrint("TotalBlocks %d TotalSize %d\n",
851 TotalBlocks
, TotalSize
);
853 Size
= EiFreeNonPagedPool
- (MiNonPagedPoolLength
- MiNonPagedPoolNrOfPages
* PAGE_SIZE
);
854 DbgPrint("Freeblocks %d TotalFreeSize %d AverageFreeSize %d\n",
855 EiNrFreeBlocks
, Size
, EiNrFreeBlocks
? Size
/ EiNrFreeBlocks
: 0);
856 DbgPrint("***************** Dump Complete ***************\n");
857 #endif /* defined(TAG_STATISTICS_TRACKING) */
862 MiDebugDumpNonPagedPool(BOOLEAN NewOnly
)
864 #if defined(POOL_DEBUG_APIS)
866 PLIST_ENTRY current_entry
;
869 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
871 DbgPrint("******* Dumping non paging pool contents ******\n");
872 current_entry
= UsedBlockListHead
.Flink
;
873 while (current_entry
!= &UsedBlockListHead
)
875 current
= CONTAINING_RECORD(current_entry
, HDR_USED
, ListEntry
);
876 if (!NewOnly
|| !current
->Dumped
)
880 c1
= (CHAR
)((current
->Tag
>> 24) & 0xFF);
881 c2
= (CHAR
)((current
->Tag
>> 16) & 0xFF);
882 c3
= (CHAR
)((current
->Tag
>> 8) & 0xFF);
883 c4
= (CHAR
)(current
->Tag
& 0xFF);
885 if (isprint(c1
) && isprint(c2
) && isprint(c3
) && isprint(c4
))
887 DbgPrint("Size 0x%x Tag 0x%x (%c%c%c%c) Allocator 0x%x\n",
888 current
->hdr
.Size
, current
->Tag
, c4
, c3
, c2
, c1
,
893 DbgPrint("Size 0x%x Tag 0x%x Allocator 0x%x\n",
894 current
->hdr
.Size
, current
->Tag
, current
->Caller
);
896 current
->Dumped
= TRUE
;
898 current_entry
= current_entry
->Flink
;
900 DbgPrint("***************** Dump Complete ***************\n");
901 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
905 #ifdef ENABLE_VALIDATE_POOL
906 static void validate_free_list(void)
908 * FUNCTION: Validate the integrity of the list of free blocks
912 unsigned int blocks_seen
=0;
915 p
= avl_get_first(FreeBlockListRoot
);
921 current
= CONTAINING_RECORD(p
, HDR_FREE
, Node
);
922 base_addr
= (PVOID
)current
;
924 if (current
->hdr
.Magic
!= BLOCK_HDR_FREE_MAGIC
)
926 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
928 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
931 if (base_addr
< MiNonPagedPoolStart
||
932 (ULONG_PTR
)base_addr
+ current
->hdr
.Size
> (ULONG_PTR
)MiNonPagedPoolStart
+ MiNonPagedPoolLength
)
934 DbgPrint("Block %x found outside pool area\n",current
);
935 DbgPrint("Size %d\n",current
->hdr
.Size
);
936 DbgPrint("Limits are %x %x\n",MiNonPagedPoolStart
,
937 (ULONG_PTR
)MiNonPagedPoolStart
+MiNonPagedPoolLength
);
938 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
941 if (blocks_seen
> EiNrFreeBlocks
)
943 DbgPrint("Too many blocks on free list\n");
944 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
946 p
= avl_get_next(FreeBlockListRoot
, p
);
950 static void validate_used_list(void)
952 * FUNCTION: Validate the integrity of the list of used blocks
956 PLIST_ENTRY current_entry
;
957 unsigned int blocks_seen
=0;
959 current_entry
= UsedBlockListHead
.Flink
;
960 while (current_entry
!= &UsedBlockListHead
)
964 current
= CONTAINING_RECORD(current_entry
, HDR_USED
, ListEntry
);
965 base_addr
= (PVOID
)current
;
967 if (current
->hdr
.Magic
!= BLOCK_HDR_USED_MAGIC
)
969 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
971 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
973 if (base_addr
< MiNonPagedPoolStart
||
974 ((ULONG_PTR
)base_addr
+current
->hdr
.Size
) >
975 (ULONG_PTR
)MiNonPagedPoolStart
+MiNonPagedPoolLength
)
977 DbgPrint("Block %x found outside pool area\n",current
);
978 DbgPrint("Size %d\n",current
->hdr
.Size
);
979 DbgPrint("Limits are %x %x\n",MiNonPagedPoolStart
,
980 (ULONG_PTR
)MiNonPagedPoolStart
+MiNonPagedPoolLength
);
981 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
984 if (blocks_seen
> EiNrUsedBlocks
)
986 DbgPrint("Too many blocks on used list\n");
987 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
989 if (current
->ListEntry
.Flink
!= &UsedBlockListHead
&&
990 current
->ListEntry
.Flink
->Blink
!= ¤t
->ListEntry
)
992 DbgPrint("%s:%d:Break in list (current %x next %x "
993 "current->next->previous %x)\n",
994 __FILE__
,__LINE__
,current
, current
->ListEntry
.Flink
,
995 current
->ListEntry
.Flink
->Blink
);
996 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
999 current_entry
= current_entry
->Flink
;
1003 static void check_duplicates(HDR
* blk
)
1005 * FUNCTION: Check a block has no duplicates
1007 * blk = block to check
1008 * NOTE: Bug checks if duplicates are found
1011 ULONG_PTR base
= (ULONG_PTR
)blk
;
1012 ULONG_PTR last
= (ULONG_PTR
)blk
+ blk
->Size
;
1013 PLIST_ENTRY current_entry
;
1018 p
= avl_get_first(FreeBlockListRoot
);
1022 free
= CONTAINING_RECORD(p
, HDR_FREE
, Node
);
1023 if (free
->hdr
.Magic
!= BLOCK_HDR_FREE_MAGIC
)
1025 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
1027 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1030 if ( (ULONG_PTR
)free
> base
&& (ULONG_PTR
)free
< last
)
1032 DbgPrint("intersecting blocks on list\n");
1033 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1035 if ( (ULONG_PTR
)free
< base
&&
1036 ((ULONG_PTR
)free
+ free
->hdr
.Size
) > base
)
1038 DbgPrint("intersecting blocks on list\n");
1039 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1041 p
= avl_get_next(FreeBlockListRoot
, p
);
1044 current_entry
= UsedBlockListHead
.Flink
;
1045 while (current_entry
!= &UsedBlockListHead
)
1047 used
= CONTAINING_RECORD(current_entry
, HDR_USED
, ListEntry
);
1049 if ( (ULONG_PTR
)used
> base
&& (ULONG_PTR
)used
< last
)
1051 DbgPrint("intersecting blocks on list\n");
1052 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1054 if ( (ULONG_PTR
)used
< base
&&
1055 ((ULONG_PTR
)used
+ used
->hdr
.Size
) > base
)
1057 DbgPrint("intersecting blocks on list\n");
1058 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1061 current_entry
= current_entry
->Flink
;
1066 static void validate_kernel_pool(void)
1068 * FUNCTION: Checks the integrity of the kernel memory heap
1073 PLIST_ENTRY current_entry
;
1076 validate_free_list();
1077 validate_used_list();
1079 p
= avl_get_first(FreeBlockListRoot
);
1082 free
= CONTAINING_RECORD(p
, HDR_FREE
, Node
);
1083 check_duplicates(&free
->hdr
);
1084 p
= avl_get_next(FreeBlockListRoot
, p
);
1086 current_entry
= UsedBlockListHead
.Flink
;
1087 while (current_entry
!= &UsedBlockListHead
)
1089 used
= CONTAINING_RECORD(current_entry
, HDR_USED
, ListEntry
);
1090 check_duplicates(&used
->hdr
);
1091 current_entry
= current_entry
->Flink
;
1098 free_pages(HDR_FREE
* blk
)
1103 start
= (ULONG_PTR
)blk
;
1104 end
= (ULONG_PTR
)blk
+ blk
->hdr
.Size
;
1107 * If the block doesn't contain a whole page then there is nothing to do
1109 if (PAGE_ROUND_UP(start
) >= PAGE_ROUND_DOWN(end
))
1116 static void remove_from_used_list(HDR_USED
* current
)
1118 RemoveEntryList(¤t
->ListEntry
);
1119 EiUsedNonPagedPool
-= current
->hdr
.Size
;
1123 static void remove_from_free_list(HDR_FREE
* current
)
1125 DPRINT("remove_from_free_list %d\n", current
->hdr
.Size
);
1127 avl_remove(&FreeBlockListRoot
, ¤t
->Node
, compare_node
);
1129 EiFreeNonPagedPool
-= current
->hdr
.Size
;
1131 DPRINT("remove_from_free_list done\n");
1134 DumpFreeBlockTree();
1139 add_to_free_list(HDR_FREE
* blk
)
1141 * FUNCTION: add the block to the free list (internal)
1145 BOOL UpdatePrevPtr
= FALSE
;
1147 DPRINT("add_to_free_list (%d)\n", blk
->hdr
.Size
);
1151 current
= (HDR_FREE
*)blk
->hdr
.previous
;
1152 if (current
&& current
->hdr
.Magic
== BLOCK_HDR_FREE_MAGIC
)
1154 remove_from_free_list(current
);
1155 current
->hdr
.Size
= current
->hdr
.Size
+ blk
->hdr
.Size
;
1156 current
->hdr
.Magic
= BLOCK_HDR_USED_MAGIC
;
1157 memset(blk
, 0xcc, HDR_USED_SIZE
);
1159 UpdatePrevPtr
= TRUE
;
1162 current
= (HDR_FREE
*)((ULONG_PTR
)blk
+ blk
->hdr
.Size
);
1163 if ((char*)current
< (char*)MiNonPagedPoolStart
+ MiNonPagedPoolLength
&&
1164 current
->hdr
.Magic
== BLOCK_HDR_FREE_MAGIC
)
1166 remove_from_free_list(current
);
1167 blk
->hdr
.Size
+= current
->hdr
.Size
;
1168 memset(current
, 0xcc, HDR_FREE_SIZE
);
1169 UpdatePrevPtr
= TRUE
;
1170 current
= (HDR_FREE
*)((ULONG_PTR
)blk
+ blk
->hdr
.Size
);
1172 if (UpdatePrevPtr
&&
1173 (char*)current
< (char*)MiNonPagedPoolStart
+ MiNonPagedPoolLength
)
1175 current
->hdr
.previous
= &blk
->hdr
;
1177 DPRINT("%d\n", blk
->hdr
.Size
);
1178 blk
->hdr
.Magic
= BLOCK_HDR_FREE_MAGIC
;
1179 EiFreeNonPagedPool
+= blk
->hdr
.Size
;
1180 avl_insert(&FreeBlockListRoot
, &blk
->Node
, compare_node
);
1181 DPRINT("add_to_free_list done\n");
1184 DumpFreeBlockTree();
1188 static void add_to_used_list(HDR_USED
* blk
)
1190 * FUNCTION: add the block to the used list (internal)
1193 InsertHeadList(&UsedBlockListHead
, &blk
->ListEntry
);
1194 EiUsedNonPagedPool
+= blk
->hdr
.Size
;
1200 grow_block(HDR_FREE
* blk
, PVOID end
)
1204 ULONG_PTR StartIndex
, EndIndex
;
1207 StartIndex
= (ULONG_PTR
)(PAGE_ROUND_UP((ULONG_PTR
)blk
+ HDR_FREE_SIZE
- (ULONG_PTR
)MiNonPagedPoolStart
)) / PAGE_SIZE
;
1208 EndIndex
= ((ULONG_PTR
)PAGE_ROUND_UP(end
) - (ULONG_PTR
)MiNonPagedPoolStart
) / PAGE_SIZE
;
1211 for (i
= StartIndex
; i
< EndIndex
; i
++)
1213 if (!(MiNonPagedPoolAllocMap
[i
/ 32] & (1 << (i
% 32))))
1215 for (j
= i
+ 1; j
< EndIndex
&& j
- i
< 32; j
++)
1217 if (MiNonPagedPoolAllocMap
[j
/ 32] & (1 << (j
% 32)))
1222 for (k
= 0; k
< j
- i
; k
++)
1224 Status
= MmRequestPageMemoryConsumer(MC_NPPOOL
, FALSE
, &Page
[k
]);
1225 if (!NT_SUCCESS(Status
))
1227 for (i
= 0; i
< k
; i
++)
1229 MmReleasePageMemoryConsumer(MC_NPPOOL
, Page
[i
]);
1234 Status
= MmCreateVirtualMapping(NULL
,
1235 (PVOID
)((ULONG_PTR
)MiNonPagedPoolStart
+ i
* PAGE_SIZE
),
1236 PAGE_READWRITE
|PAGE_SYSTEM
,
1239 if (!NT_SUCCESS(Status
))
1241 for (i
= 0; i
< k
; i
++)
1243 MmReleasePageMemoryConsumer(MC_NPPOOL
, Page
[i
]);
1247 for (j
= i
; j
< k
+ i
; j
++)
1249 MiNonPagedPoolAllocMap
[j
/ 32] |= (1 << (j
% 32));
1251 MiNonPagedPoolNrOfPages
+= k
;
1258 static HDR_USED
* get_block(unsigned int size
, unsigned long alignment
)
1260 HDR_FREE
*blk
, *current
, *previous
= NULL
, *next
= NULL
, *best
= NULL
;
1261 ULONG previous_size
= 0, current_size
, next_size
= 0, new_size
;
1263 PVOID addr
, aligned_addr
, best_aligned_addr
=NULL
;
1266 DPRINT("get_block %d\n", size
);
1268 p
= avl_find_equal_or_greater(FreeBlockListRoot
, size
+ HDR_USED_SIZE
, compare_value
);
1271 current
= CONTAINING_RECORD(p
, HDR_FREE
, Node
);
1272 addr
= (PVOID
)((ULONG_PTR
)current
+ HDR_USED_SIZE
);
1273 /* calculate first aligned address available within this block */
1274 aligned_addr
= alignment
> 0 ? MM_ROUND_UP(addr
, alignment
) : addr
;
1275 if (size
< PAGE_SIZE
)
1277 /* check that the block is in one page */
1278 if (PAGE_ROUND_DOWN(aligned_addr
) != PAGE_ROUND_DOWN((ULONG_PTR
)aligned_addr
+ size
- 1))
1280 aligned_addr
= (PVOID
)PAGE_ROUND_UP(aligned_addr
);
1283 DPRINT("%x %x\n", addr
, aligned_addr
);
1284 if (aligned_addr
!= addr
)
1286 while((ULONG_PTR
)aligned_addr
- (ULONG_PTR
)addr
< HDR_FREE_SIZE
)
1290 aligned_addr
= (PVOID
)((ULONG_PTR
)current
+ HDR_USED_SIZE
+ HDR_FREE_SIZE
);
1294 aligned_addr
= MM_ROUND_UP((PVOID
)((ULONG_PTR
)current
+ HDR_USED_SIZE
+ HDR_FREE_SIZE
), alignment
);
1296 if (size
< PAGE_SIZE
)
1298 /* check that the block is in one page */
1299 if (PAGE_ROUND_DOWN(aligned_addr
) != PAGE_ROUND_DOWN((ULONG_PTR
)aligned_addr
+ size
- 1))
1301 aligned_addr
= (PVOID
)PAGE_ROUND_UP(aligned_addr
);
1306 DPRINT("%x %x\n", addr
, aligned_addr
);
1307 new_size
= (ULONG_PTR
)aligned_addr
- (ULONG_PTR
)addr
+ size
;
1308 if (current
->hdr
.Size
>= new_size
+ HDR_USED_SIZE
&&
1309 (best
== NULL
|| current
->hdr
.Size
< best
->hdr
.Size
))
1312 best_aligned_addr
= aligned_addr
;
1313 if (new_size
<= size
+ 2 * HDR_FREE_SIZE
)
1321 if (size
< PAGE_SIZE
)
1323 if (current
->hdr
.Size
>= 2 * PAGE_SIZE
+ HDR_FREE_SIZE
)
1330 if (current
->hdr
.Size
>= size
+ alignment
+ HDR_FREE_SIZE
)
1336 p
= avl_get_next(FreeBlockListRoot
, p
);
1339 * We didn't find anything suitable at all.
1346 DPRINT(":: blk %x blk->hdr.Size %d (%d) Size %d\n", best
, best
->hdr
.Size
, best
->hdr
.Size
- HDR_USED_SIZE
, size
);
1349 current_size
= current
->hdr
.Size
- HDR_USED_SIZE
;
1350 addr
= (PVOID
)((ULONG_PTR
)current
+ HDR_USED_SIZE
);
1351 if (addr
!= best_aligned_addr
)
1353 blk
= (HDR_FREE
*)((ULONG_PTR
)best_aligned_addr
- HDR_USED_SIZE
);
1355 * if size-aligned, break off the preceding bytes into their own block...
1358 previous_size
= (ULONG_PTR
)blk
- (ULONG_PTR
)previous
- HDR_FREE_SIZE
;
1360 current_size
-= ((ULONG_PTR
)current
- (ULONG_PTR
)previous
);
1362 end
= (PVOID
)((ULONG_PTR
)current
+ HDR_USED_SIZE
+ size
);
1364 if (current_size
>= size
+ HDR_FREE_SIZE
+ MM_POOL_ALIGNMENT
)
1366 /* create a new free block after our block, if the memory size is >= 4 byte for this block */
1367 next
= (HDR_FREE
*)((ULONG_PTR
)current
+ size
+ HDR_USED_SIZE
);
1368 next_size
= current_size
- size
- HDR_FREE_SIZE
;
1369 current_size
= size
;
1370 end
= (PVOID
)((ULONG_PTR
)next
+ HDR_FREE_SIZE
);
1375 remove_from_free_list(previous
);
1376 if (!grow_block(previous
, end
))
1378 add_to_free_list(previous
);
1381 memset(current
, 0, HDR_USED_SIZE
);
1382 current
->hdr
.Size
= current_size
+ HDR_USED_SIZE
;
1383 current
->hdr
.Magic
= BLOCK_HDR_USED_MAGIC
;
1384 current
->hdr
.previous
= &previous
->hdr
;
1385 previous
->hdr
.Size
= previous_size
+ HDR_FREE_SIZE
;
1388 blk
= (HDR_FREE
*)((ULONG_PTR
)current
+ current
->hdr
.Size
);
1389 if ((ULONG_PTR
)blk
< (ULONG_PTR
)MiNonPagedPoolStart
+ MiNonPagedPoolLength
)
1391 blk
->hdr
.previous
= ¤t
->hdr
;
1395 add_to_free_list(previous
);
1399 remove_from_free_list(current
);
1401 if (!grow_block(current
, end
))
1403 add_to_free_list(current
);
1406 current
->hdr
.Magic
= BLOCK_HDR_USED_MAGIC
;
1409 current
->hdr
.Size
= current_size
+ HDR_USED_SIZE
;
1415 memset(next
, 0, HDR_FREE_SIZE
);
1416 next
->hdr
.Size
= next_size
+ HDR_FREE_SIZE
;
1417 next
->hdr
.Magic
= BLOCK_HDR_FREE_MAGIC
;
1418 next
->hdr
.previous
= ¤t
->hdr
;
1419 blk
= (HDR_FREE
*)((ULONG_PTR
)next
+ next
->hdr
.Size
);
1420 if ((ULONG_PTR
)blk
< (ULONG_PTR
)MiNonPagedPoolStart
+ MiNonPagedPoolLength
)
1422 blk
->hdr
.previous
= &next
->hdr
;
1424 add_to_free_list(next
);
1427 add_to_used_list((HDR_USED
*)current
);
1430 if (size
< PAGE_SIZE
)
1432 addr
= (PVOID
)((ULONG_PTR
)current
+ HDR_USED_SIZE
);
1433 if (PAGE_ROUND_DOWN(addr
) != PAGE_ROUND_DOWN((PVOID
)((ULONG_PTR
)addr
+ size
- 1)))
1435 DPRINT1("%x %x\n", addr
, (ULONG_PTR
)addr
+ size
);
1437 ASSERT (PAGE_ROUND_DOWN(addr
) == PAGE_ROUND_DOWN((PVOID
)((ULONG_PTR
)addr
+ size
- 1)));
1441 addr
= (PVOID
)((ULONG_PTR
)current
+ HDR_USED_SIZE
);
1442 ASSERT(MM_ROUND_UP(addr
, alignment
) == addr
);
1444 return (HDR_USED
*)current
;
1448 ExRosQueryNonPagedPoolTag ( PVOID Addr
)
1450 HDR_USED
* blk
=(HDR_USED
*)((ULONG_PTR
)Addr
- HDR_USED_SIZE
);
1451 if (blk
->hdr
.Magic
!= BLOCK_HDR_USED_MAGIC
)
1457 VOID STDCALL
ExFreeNonPagedPool (PVOID block
)
1459 * FUNCTION: Releases previously allocated memory
1461 * block = block to free
1464 HDR_USED
* blk
=(HDR_USED
*)((ULONG_PTR
)block
- HDR_USED_SIZE
);
1472 DPRINT("freeing block %x\n",blk
);
1474 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block
,blk
->hdr
.Size
,
1476 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
1479 if (blk
->hdr
.Magic
!= BLOCK_HDR_USED_MAGIC
)
1481 if (blk
->hdr
.Magic
== BLOCK_HDR_FREE_MAGIC
)
1483 DbgPrint("ExFreePool of already freed address %x\n", block
);
1487 DbgPrint("ExFreePool of non-allocated address %x (magic %x)\n",
1488 block
, blk
->hdr
.Magic
);
1493 memset(block
, 0xcc, blk
->hdr
.Size
- HDR_USED_SIZE
);
1494 #ifdef TAG_STATISTICS_TRACKING
1496 MiRemoveFromTagHashTable(blk
);
1499 remove_from_used_list(blk
);
1500 blk
->hdr
.Magic
= BLOCK_HDR_FREE_MAGIC
;
1501 add_to_free_list((HDR_FREE
*)blk
);
1503 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
1507 ExAllocateNonPagedPoolWithTag(POOL_TYPE Type
, ULONG Size
, ULONG Tag
, PVOID Caller
)
1510 HDR_USED
* best
= NULL
;
1514 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
1517 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
1522 /* after some allocations print the npaged pool stats */
1523 #ifdef TAG_STATISTICS_TRACKING
1526 static ULONG counter
= 0;
1527 if (counter
++ % 100000 == 0)
1529 MiDebugDumpNonPagedPoolStats(FALSE
);
1535 * accomodate this useful idiom
1539 POOL_TRACE("= NULL\n");
1540 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
1543 /* Make the size dword alligned, this makes the block dword alligned */
1544 Size
= ROUND_UP(Size
, MM_POOL_ALIGNMENT
);
1546 if (Size
>= PAGE_SIZE
)
1548 alignment
= PAGE_SIZE
;
1550 else if (Type
& CACHE_ALIGNED_POOL_MASK
)
1552 alignment
= MM_CACHE_LINE_SIZE
;
1559 best
= get_block(Size
, alignment
);
1562 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
1563 DPRINT1("Trying to allocate %lu bytes from nonpaged pool - nothing suitable found, returning NULL\n",
1568 best
->Caller
= Caller
;
1569 best
->Dumped
= FALSE
;
1570 best
->TagListEntry
.Flink
= best
->TagListEntry
.Blink
= NULL
;
1571 #ifdef TAG_STATISTICS_TRACKING
1573 MiAddToTagHashTable(best
);
1576 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
1577 block
= (PVOID
)((ULONG_PTR
)best
+ HDR_USED_SIZE
);
1578 /* RtlZeroMemory(block, Size);*/
1585 MiInitializeNonPagedPool(VOID
)
1593 #ifdef TAG_STATISTICS_TRACKING
1595 for (i
= 0; i
< TAG_HASH_TABLE_SIZE
; i
++)
1597 InitializeListHead(&tag_hash_table
[i
]);
1600 KeInitializeSpinLock(&MmNpoolLock
);
1601 InitializeListHead(&UsedBlockListHead
);
1602 InitializeListHead(&AddressListHead
);
1603 FreeBlockListRoot
= NULL
;
1605 MiNonPagedPoolAllocMap
= (PVOID
)((ULONG_PTR
)MiNonPagedPoolStart
+ PAGE_SIZE
);
1606 MiNonPagedPoolNrOfPages
= PAGE_ROUND_UP(ROUND_UP(MiNonPagedPoolLength
/ PAGE_SIZE
, 32) / 8 + HDR_FREE_SIZE
) + PAGE_SIZE
;
1607 MiNonPagedPoolNrOfPages
/= PAGE_SIZE
;
1608 Address
= MiNonPagedPoolStart
;
1610 for (i
= 0; i
< MiNonPagedPoolNrOfPages
; i
++)
1612 Status
= MmRequestPageMemoryConsumer(MC_NPPOOL
, FALSE
, &Page
);
1613 if (!NT_SUCCESS(Status
))
1615 DbgPrint("Unable to allocate a page\n");
1619 Status
= MmCreateVirtualMapping(NULL
,
1621 PAGE_READWRITE
|PAGE_SYSTEM
,
1624 if (!NT_SUCCESS(Status
))
1626 DbgPrint("Unable to create virtual mapping\n");
1629 Address
= (PVOID
)((ULONG_PTR
)Address
+ PAGE_SIZE
);
1632 for (i
= 0; i
< MiNonPagedPoolNrOfPages
; i
++)
1634 MiNonPagedPoolAllocMap
[i
/ 32] |= (1 << (i
% 32));
1637 /* the first block is free */
1638 free
= (HDR_FREE
*)MiNonPagedPoolStart
;
1639 free
->hdr
.Magic
= BLOCK_HDR_FREE_MAGIC
;
1640 free
->hdr
.Size
= PAGE_SIZE
- HDR_USED_SIZE
;
1641 free
->hdr
.previous
= NULL
;
1642 memset((PVOID
)((ULONG_PTR
)free
+ HDR_FREE_SIZE
), 0xcc, free
->hdr
.Size
- HDR_FREE_SIZE
);
1643 add_to_free_list(free
);
1645 /* the second block contains the non paged pool bitmap */
1646 used
= (HDR_USED
*)((ULONG_PTR
)free
+ free
->hdr
.Size
);
1647 used
->hdr
.Magic
= BLOCK_HDR_USED_MAGIC
;
1648 used
->hdr
.Size
= ROUND_UP(MiNonPagedPoolLength
/ PAGE_SIZE
, 32) / 8 + HDR_USED_SIZE
;
1649 used
->hdr
.previous
= &free
->hdr
;
1650 used
->Tag
= 0xffffffff;
1652 used
->Dumped
= FALSE
;
1653 add_to_used_list(used
);
1654 #ifdef TAG_STATISTICS_TRACKING
1655 MiAddToTagHashTable(used
);
1658 /* the third block is the free block after the bitmap */
1659 free
= (HDR_FREE
*)((ULONG_PTR
)used
+ used
->hdr
.Size
);
1660 free
->hdr
.Magic
= BLOCK_HDR_FREE_MAGIC
;
1661 free
->hdr
.Size
= MiNonPagedPoolLength
- ((ULONG_PTR
)free
- (ULONG_PTR
)MiNonPagedPoolStart
);
1662 free
->hdr
.previous
= &used
->hdr
;
1663 memset((PVOID
)((ULONG_PTR
)free
+ HDR_FREE_SIZE
), 0x0cc, (ULONG_PTR
)Address
- (ULONG_PTR
)free
- HDR_FREE_SIZE
);
1664 add_to_free_list(free
);
1669 MiAllocateSpecialPool (IN POOL_TYPE PoolType
,
1670 IN SIZE_T NumberOfBytes
,
1675 /* FIXME: Special Pools not Supported */
1676 DbgPrint("Special Pools not supported\n");