3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/npool.c
6 * PURPOSE: Implements the kernel memory pool
8 * PROGRAMMERS: David Welch (welch@cwcom.net)
9 * Iwan Fatahi (i_fatahi@hotmail.com)
10 * Robert Bergkvist (fragdance@hotmail.com)
14 /* INCLUDES ****************************************************************/
18 #include <internal/debug.h>
20 #if defined (ALLOC_PRAGMA)
21 #pragma alloc_text(INIT, MiInitializeNonPagedPool)
24 #ifdef ENABLE_VALIDATE_POOL
25 #define VALIDATE_POOL validate_kernel_pool()
31 #define POOL_TRACE(args...) do { DbgPrint(args); } while(0);
34 #define POOL_TRACE(args...)
40 /* avl types ****************************************************************/
43 * This declarations should be moved into a separate header file.
48 struct _NODE
* link
[2];
54 /* TYPES *******************************************************************/
56 #define BLOCK_HDR_USED_MAGIC (0xdeadbeef)
57 #define BLOCK_HDR_FREE_MAGIC (0xceadbeef)
60 * fields present at the start of a block (this is for internal use only)
66 struct _HDR
* previous
;
69 typedef struct _HDR_USED
75 LIST_ENTRY TagListEntry
;
77 } HDR_USED
, *PHDR_USED
;
79 typedef struct _HDR_FREE
83 } HDR_FREE
, *PHDR_FREE
;
85 #define HDR_FREE_SIZE ROUND_UP(sizeof(HDR_FREE), MM_POOL_ALIGNMENT)
86 #define HDR_USED_SIZE ROUND_UP(sizeof(HDR_USED), MM_POOL_ALIGNMENT)
89 /* GLOBALS *****************************************************************/
91 extern PVOID MiNonPagedPoolStart
;
92 extern ULONG MiNonPagedPoolLength
;
95 * Head of the list of free blocks
97 static PNODE FreeBlockListRoot
= NULL
;
100 * Head of the list of in use block
102 static LIST_ENTRY UsedBlockListHead
;
104 static LIST_ENTRY AddressListHead
;
107 * Count of free blocks
109 static ULONG EiNrFreeBlocks
= 0;
112 * Count of used blocks
114 static ULONG EiNrUsedBlocks
= 0;
117 * Lock that protects the non-paged pool data structures
119 static KSPIN_LOCK MmNpoolLock
;
122 * Total memory used for free nonpaged pool blocks
124 ULONG EiFreeNonPagedPool
= 0;
127 * Total memory used for nonpaged pool blocks
129 ULONG EiUsedNonPagedPool
= 0;
131 /* Total quota for Non Paged Pool */
132 ULONG MmTotalNonPagedPoolQuota
= 0;
135 * Allocate a range of memory in the nonpaged pool
138 MiAllocNonPagedPoolRegion(unsigned int nr_pages
);
141 MiFreeNonPagedPoolRegion(PVOID Addr
, ULONG Count
, BOOLEAN Free
);
143 #ifdef TAG_STATISTICS_TRACKING
144 #define TAG_HASH_TABLE_SIZE (1024)
145 static LIST_ENTRY tag_hash_table
[TAG_HASH_TABLE_SIZE
];
146 #endif /* TAG_STATISTICS_TRACKING */
148 static PULONG MiNonPagedPoolAllocMap
;
149 static ULONG MiNonPagedPoolNrOfPages
;
151 /* avl helper functions ****************************************************/
153 void DumpFreeBlockNode(PNODE p
)
155 static int count
= 0;
162 DumpFreeBlockNode(p
->link
[0]);
163 blk
= CONTAINING_RECORD(p
, HDR_FREE
, Node
);
164 DbgPrint("%08x %8d (%d)\n", blk
, blk
->hdr
.Size
, count
);
165 DumpFreeBlockNode(p
->link
[1]);
170 void DumpFreeBlockTree(void)
172 DbgPrint("--- Begin tree ------------------\n");
173 DbgPrint("%08x\n", CONTAINING_RECORD(FreeBlockListRoot
, HDR_FREE
, Node
));
174 DumpFreeBlockNode(FreeBlockListRoot
);
175 DbgPrint("--- End tree --------------------\n");
178 int compare_node(PNODE p1
, PNODE p2
)
180 HDR_FREE
* blk1
= CONTAINING_RECORD(p1
, HDR_FREE
, Node
);
181 HDR_FREE
* blk2
= CONTAINING_RECORD(p2
, HDR_FREE
, Node
);
183 if (blk1
->hdr
.Size
== blk2
->hdr
.Size
)
196 if (blk1
->hdr
.Size
< blk2
->hdr
.Size
)
200 if (blk1
->hdr
.Size
> blk2
->hdr
.Size
)
209 int compare_value(PVOID value
, PNODE p
)
211 HDR_FREE
* blk
= CONTAINING_RECORD(p
, HDR_FREE
, Node
);
212 ULONG v
= *(PULONG
)value
;
214 if (v
< blk
->hdr
.Size
)
218 if (v
> blk
->hdr
.Size
)
225 /* avl functions **********************************************************/
228 * The avl functions should be moved into a separate file.
231 /* The avl_insert and avl_remove are based on libavl (library for manipulation of binary trees). */
233 void avl_insert (PNODE
* root
, PNODE n
, int (*compare
)(PNODE
, PNODE
))
235 PNODE y
; /* Top node to update balance factor, and parent. */
236 PNODE p
, q
; /* Iterator, and parent. */
237 PNODE w
; /* New root of rebalanced subtree. */
238 int dir
= 0; /* Direction to descend. */
240 n
->link
[0] = n
->link
[1] = n
->parent
= NULL
;
244 for (q
= NULL
, p
= *root
; p
!= NULL
; q
= p
, p
= p
->link
[dir
])
246 dir
= compare(n
, p
) > 0;
268 for (p
= n
; p
!= y
; p
= q
)
271 dir
= q
->link
[0] != p
;
282 if (y
->balance
== -2)
284 PNODE x
= y
->link
[0];
285 if (x
->balance
== -1)
288 y
->link
[0] = x
->link
[1];
290 x
->balance
= y
->balance
= 0;
291 x
->parent
= y
->parent
;
293 if (y
->link
[0] != NULL
)
295 y
->link
[0]->parent
= y
;
300 ASSERT(x
->balance
== +1);
302 x
->link
[1] = w
->link
[0];
304 y
->link
[0] = w
->link
[1];
306 if (w
->balance
== -1)
311 else if (w
->balance
== 0)
313 x
->balance
= y
->balance
= 0;
315 else /* |w->pavl_balance == +1| */
321 w
->parent
= y
->parent
;
322 x
->parent
= y
->parent
= w
;
323 if (x
->link
[1] != NULL
)
325 x
->link
[1]->parent
= x
;
327 if (y
->link
[0] != NULL
)
329 y
->link
[0]->parent
= y
;
333 else if (y
->balance
== +2)
335 PNODE x
= y
->link
[1];
336 if (x
->balance
== +1)
339 y
->link
[1] = x
->link
[0];
341 x
->balance
= y
->balance
= 0;
342 x
->parent
= y
->parent
;
344 if (y
->link
[1] != NULL
)
346 y
->link
[1]->parent
= y
;
351 ASSERT(x
->balance
== -1);
353 x
->link
[0] = w
->link
[1];
355 y
->link
[1] = w
->link
[0];
362 else if (w
->balance
== 0)
364 x
->balance
= y
->balance
= 0;
366 else /* |w->pavl_balance == -1| */
372 w
->parent
= y
->parent
;
373 x
->parent
= y
->parent
= w
;
374 if (x
->link
[0] != NULL
)
376 x
->link
[0]->parent
= x
;
378 if (y
->link
[1] != NULL
)
380 y
->link
[1]->parent
= y
;
388 if (w
->parent
!= NULL
)
390 w
->parent
->link
[y
!= w
->parent
->link
[0]] = w
;
400 void avl_remove (PNODE
*root
, PNODE item
, int (*compare
)(PNODE
, PNODE
))
402 PNODE p
; /* Traverses tree to find node to delete. */
403 PNODE q
; /* Parent of |p|. */
404 int dir
; /* Side of |q| on which |p| is linked. */
406 if (root
== NULL
|| *root
== NULL
)
420 dir
= compare(p
, q
) > 0;
423 if (p
->link
[1] == NULL
)
425 q
->link
[dir
] = p
->link
[0];
426 if (q
->link
[dir
] != NULL
)
428 q
->link
[dir
]->parent
= p
->parent
;
433 PNODE r
= p
->link
[1];
434 if (r
->link
[0] == NULL
)
436 r
->link
[0] = p
->link
[0];
438 r
->parent
= p
->parent
;
439 if (r
->link
[0] != NULL
)
441 r
->link
[0]->parent
= r
;
443 r
->balance
= p
->balance
;
449 PNODE s
= r
->link
[0];
450 while (s
->link
[0] != NULL
)
455 r
->link
[0] = s
->link
[1];
456 s
->link
[0] = p
->link
[0];
457 s
->link
[1] = p
->link
[1];
459 if (s
->link
[0] != NULL
)
461 s
->link
[0]->parent
= s
;
463 s
->link
[1]->parent
= s
;
464 s
->parent
= p
->parent
;
465 if (r
->link
[0] != NULL
)
467 r
->link
[0]->parent
= r
;
469 s
->balance
= p
->balance
;
475 item
->link
[0] = item
->link
[1] = item
->parent
= NULL
;
478 while (q
!= (PNODE
) root
)
482 if (y
->parent
!= NULL
)
493 dir
= q
->link
[0] != y
;
495 if (y
->balance
== +1)
499 else if (y
->balance
== +2)
501 PNODE x
= y
->link
[1];
502 if (x
->balance
== -1)
506 ASSERT(x
->balance
== -1);
508 x
->link
[0] = w
->link
[1];
510 y
->link
[1] = w
->link
[0];
512 if (w
->balance
== +1)
517 else if (w
->balance
== 0)
519 x
->balance
= y
->balance
= 0;
521 else /* |w->pavl_balance == -1| */
527 w
->parent
= y
->parent
;
528 x
->parent
= y
->parent
= w
;
529 if (x
->link
[0] != NULL
)
531 x
->link
[0]->parent
= x
;
533 if (y
->link
[1] != NULL
)
535 y
->link
[1]->parent
= y
;
541 y
->link
[1] = x
->link
[0];
543 x
->parent
= y
->parent
;
545 if (y
->link
[1] != NULL
)
547 y
->link
[1]->parent
= y
;
558 x
->balance
= y
->balance
= 0;
566 dir
= q
->link
[0] != y
;
568 if (y
->balance
== -1)
572 else if (y
->balance
== -2)
574 PNODE x
= y
->link
[0];
575 if (x
->balance
== +1)
578 ASSERT(x
->balance
== +1);
580 x
->link
[1] = w
->link
[0];
582 y
->link
[0] = w
->link
[1];
584 if (w
->balance
== -1)
589 else if (w
->balance
== 0)
591 x
->balance
= y
->balance
= 0;
593 else /* |w->pavl_balance == +1| */
599 w
->parent
= y
->parent
;
600 x
->parent
= y
->parent
= w
;
601 if (x
->link
[1] != NULL
)
603 x
->link
[1]->parent
= x
;
605 if (y
->link
[0] != NULL
)
607 y
->link
[0]->parent
= y
;
613 y
->link
[0] = x
->link
[1];
615 x
->parent
= y
->parent
;
617 if (y
->link
[0] != NULL
)
619 y
->link
[0]->parent
= y
;
630 x
->balance
= y
->balance
= 0;
640 PNODE _cdecl
avl_get_first(PNODE root
)
655 PNODE
avl_get_next(PNODE root
, PNODE p
)
670 while (q
&& q
->link
[1] == p
)
686 PNODE
avl_find_equal_or_greater(PNODE root
, ULONG size
, int (compare
)(PVOID
, PNODE
))
692 for (p
= root
; p
!= NULL
;)
694 cmp
= compare((PVOID
)&size
, p
);
708 cmp
= compare((PVOID
)&size
, p
->link
[0]);
721 /* non paged pool functions ************************************************/
723 #ifdef TAG_STATISTICS_TRACKING
725 MiRemoveFromTagHashTable(HDR_USED
* block
)
727 * Remove a block from the tag hash table
734 RemoveEntryList(&block
->TagListEntry
);
738 MiAddToTagHashTable(HDR_USED
* block
)
740 * Add a block to the tag hash table
750 hash
= block
->Tag
% TAG_HASH_TABLE_SIZE
;
752 InsertHeadList(&tag_hash_table
[hash
], &block
->TagListEntry
);
754 #endif /* TAG_STATISTICS_TRACKING */
756 #if defined(TAG_STATISTICS_TRACKING)
758 MiDumpTagStats(ULONG CurrentTag
, ULONG CurrentNrBlocks
, ULONG CurrentSize
)
762 c1
= (CHAR
)((CurrentTag
>> 24) & 0xFF);
763 c2
= (CHAR
)((CurrentTag
>> 16) & 0xFF);
764 c3
= (CHAR
)((CurrentTag
>> 8) & 0xFF);
765 c4
= (CHAR
)(CurrentTag
& 0xFF);
767 if (isprint(c1
) && isprint(c2
) && isprint(c3
) && isprint(c4
))
769 DbgPrint("Tag %x (%c%c%c%c) Blocks %d Total Size %d Average Size %d\n",
770 CurrentTag
, c4
, c3
, c2
, c1
, CurrentNrBlocks
,
771 CurrentSize
, CurrentSize
/ CurrentNrBlocks
);
775 DbgPrint("Tag %x Blocks %d Total Size %d Average Size %d\n",
776 CurrentTag
, CurrentNrBlocks
, CurrentSize
,
777 CurrentSize
/ CurrentNrBlocks
);
780 #endif /* defined(TAG_STATISTICS_TRACKING) */
784 MiDebugDumpNonPagedPoolStats(BOOLEAN NewOnly
)
786 #if defined(TAG_STATISTICS_TRACKING)
790 ULONG CurrentNrBlocks
= 0;
791 ULONG CurrentSize
= 0;
795 LIST_ENTRY tmpListHead
;
796 PLIST_ENTRY current_entry
;
798 DbgPrint("******* Dumping non paging pool stats ******\n");
801 for (i
= 0; i
< TAG_HASH_TABLE_SIZE
; i
++)
803 InitializeListHead(&tmpListHead
);
805 while (!IsListEmpty(&tag_hash_table
[i
]))
809 current_entry
= tag_hash_table
[i
].Flink
;
810 while (current_entry
!= &tag_hash_table
[i
])
812 current
= CONTAINING_RECORD(current_entry
, HDR_USED
, TagListEntry
);
813 current_entry
= current_entry
->Flink
;
816 CurrentTag
= current
->Tag
;
820 if (current
->Tag
== CurrentTag
)
822 RemoveEntryList(¤t
->TagListEntry
);
823 InsertHeadList(&tmpListHead
, ¤t
->TagListEntry
);
824 if (!NewOnly
|| !current
->Dumped
)
828 CurrentSize
+= current
->hdr
.Size
;
829 TotalSize
+= current
->hdr
.Size
;
830 current
->Dumped
= TRUE
;
834 if (CurrentTag
!= 0 && CurrentNrBlocks
!= 0)
836 MiDumpTagStats(CurrentTag
, CurrentNrBlocks
, CurrentSize
);
839 if (!IsListEmpty(&tmpListHead
))
841 tag_hash_table
[i
].Flink
= tmpListHead
.Flink
;
842 tag_hash_table
[i
].Flink
->Blink
= &tag_hash_table
[i
];
843 tag_hash_table
[i
].Blink
= tmpListHead
.Blink
;
844 tag_hash_table
[i
].Blink
->Flink
= &tag_hash_table
[i
];
847 if (TotalBlocks
!= 0)
849 DbgPrint("TotalBlocks %d TotalSize %d AverageSize %d\n",
850 TotalBlocks
, TotalSize
, TotalSize
/ TotalBlocks
);
854 DbgPrint("TotalBlocks %d TotalSize %d\n",
855 TotalBlocks
, TotalSize
);
857 Size
= EiFreeNonPagedPool
- (MiNonPagedPoolLength
- MiNonPagedPoolNrOfPages
* PAGE_SIZE
);
858 DbgPrint("Freeblocks %d TotalFreeSize %d AverageFreeSize %d\n",
859 EiNrFreeBlocks
, Size
, EiNrFreeBlocks
? Size
/ EiNrFreeBlocks
: 0);
860 DbgPrint("***************** Dump Complete ***************\n");
861 #endif /* defined(TAG_STATISTICS_TRACKING) */
866 MiDebugDumpNonPagedPool(BOOLEAN NewOnly
)
868 #if defined(POOL_DEBUG_APIS)
870 PLIST_ENTRY current_entry
;
873 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
875 DbgPrint("******* Dumping non paging pool contents ******\n");
876 current_entry
= UsedBlockListHead
.Flink
;
877 while (current_entry
!= &UsedBlockListHead
)
879 current
= CONTAINING_RECORD(current_entry
, HDR_USED
, ListEntry
);
880 if (!NewOnly
|| !current
->Dumped
)
884 c1
= (CHAR
)((current
->Tag
>> 24) & 0xFF);
885 c2
= (CHAR
)((current
->Tag
>> 16) & 0xFF);
886 c3
= (CHAR
)((current
->Tag
>> 8) & 0xFF);
887 c4
= (CHAR
)(current
->Tag
& 0xFF);
889 if (isprint(c1
) && isprint(c2
) && isprint(c3
) && isprint(c4
))
891 DbgPrint("Size 0x%x Tag 0x%x (%c%c%c%c) Allocator 0x%x\n",
892 current
->hdr
.Size
, current
->Tag
, c4
, c3
, c2
, c1
,
897 DbgPrint("Size 0x%x Tag 0x%x Allocator 0x%x\n",
898 current
->hdr
.Size
, current
->Tag
, current
->Caller
);
900 current
->Dumped
= TRUE
;
902 current_entry
= current_entry
->Flink
;
904 DbgPrint("***************** Dump Complete ***************\n");
905 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
909 #ifdef ENABLE_VALIDATE_POOL
910 static void validate_free_list(void)
912 * FUNCTION: Validate the integrity of the list of free blocks
916 unsigned int blocks_seen
=0;
919 p
= avl_get_first(FreeBlockListRoot
);
925 current
= CONTAINING_RECORD(p
, HDR_FREE
, Node
);
926 base_addr
= (PVOID
)current
;
928 if (current
->hdr
.Magic
!= BLOCK_HDR_FREE_MAGIC
)
930 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
932 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
935 if (base_addr
< MiNonPagedPoolStart
||
936 (ULONG_PTR
)base_addr
+ current
->hdr
.Size
> (ULONG_PTR
)MiNonPagedPoolStart
+ MiNonPagedPoolLength
)
938 DbgPrint("Block %x found outside pool area\n",current
);
939 DbgPrint("Size %d\n",current
->hdr
.Size
);
940 DbgPrint("Limits are %x %x\n",MiNonPagedPoolStart
,
941 (ULONG_PTR
)MiNonPagedPoolStart
+MiNonPagedPoolLength
);
942 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
945 if (blocks_seen
> EiNrFreeBlocks
)
947 DbgPrint("Too many blocks on free list\n");
948 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
950 p
= avl_get_next(FreeBlockListRoot
, p
);
954 static void validate_used_list(void)
956 * FUNCTION: Validate the integrity of the list of used blocks
960 PLIST_ENTRY current_entry
;
961 unsigned int blocks_seen
=0;
963 current_entry
= UsedBlockListHead
.Flink
;
964 while (current_entry
!= &UsedBlockListHead
)
968 current
= CONTAINING_RECORD(current_entry
, HDR_USED
, ListEntry
);
969 base_addr
= (PVOID
)current
;
971 if (current
->hdr
.Magic
!= BLOCK_HDR_USED_MAGIC
)
973 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
975 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
977 if (base_addr
< MiNonPagedPoolStart
||
978 ((ULONG_PTR
)base_addr
+current
->hdr
.Size
) >
979 (ULONG_PTR
)MiNonPagedPoolStart
+MiNonPagedPoolLength
)
981 DbgPrint("Block %x found outside pool area\n",current
);
982 DbgPrint("Size %d\n",current
->hdr
.Size
);
983 DbgPrint("Limits are %x %x\n",MiNonPagedPoolStart
,
984 (ULONG_PTR
)MiNonPagedPoolStart
+MiNonPagedPoolLength
);
985 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
988 if (blocks_seen
> EiNrUsedBlocks
)
990 DbgPrint("Too many blocks on used list\n");
991 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
993 if (current
->ListEntry
.Flink
!= &UsedBlockListHead
&&
994 current
->ListEntry
.Flink
->Blink
!= ¤t
->ListEntry
)
996 DbgPrint("%s:%d:Break in list (current %x next %x "
997 "current->next->previous %x)\n",
998 __FILE__
,__LINE__
,current
, current
->ListEntry
.Flink
,
999 current
->ListEntry
.Flink
->Blink
);
1000 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1003 current_entry
= current_entry
->Flink
;
1007 static void check_duplicates(HDR
* blk
)
1009 * FUNCTION: Check a block has no duplicates
1011 * blk = block to check
1012 * NOTE: Bug checks if duplicates are found
1015 ULONG_PTR base
= (ULONG_PTR
)blk
;
1016 ULONG_PTR last
= (ULONG_PTR
)blk
+ blk
->Size
;
1017 PLIST_ENTRY current_entry
;
1022 p
= avl_get_first(FreeBlockListRoot
);
1026 free
= CONTAINING_RECORD(p
, HDR_FREE
, Node
);
1027 if (free
->hdr
.Magic
!= BLOCK_HDR_FREE_MAGIC
)
1029 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
1031 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1034 if ( (ULONG_PTR
)free
> base
&& (ULONG_PTR
)free
< last
)
1036 DbgPrint("intersecting blocks on list\n");
1037 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1039 if ( (ULONG_PTR
)free
< base
&&
1040 ((ULONG_PTR
)free
+ free
->hdr
.Size
) > base
)
1042 DbgPrint("intersecting blocks on list\n");
1043 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1045 p
= avl_get_next(FreeBlockListRoot
, p
);
1048 current_entry
= UsedBlockListHead
.Flink
;
1049 while (current_entry
!= &UsedBlockListHead
)
1051 used
= CONTAINING_RECORD(current_entry
, HDR_USED
, ListEntry
);
1053 if ( (ULONG_PTR
)used
> base
&& (ULONG_PTR
)used
< last
)
1055 DbgPrint("intersecting blocks on list\n");
1056 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1058 if ( (ULONG_PTR
)used
< base
&&
1059 ((ULONG_PTR
)used
+ used
->hdr
.Size
) > base
)
1061 DbgPrint("intersecting blocks on list\n");
1062 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1065 current_entry
= current_entry
->Flink
;
1070 static void validate_kernel_pool(void)
1072 * FUNCTION: Checks the integrity of the kernel memory heap
1077 PLIST_ENTRY current_entry
;
1080 validate_free_list();
1081 validate_used_list();
1083 p
= avl_get_first(FreeBlockListRoot
);
1086 free
= CONTAINING_RECORD(p
, HDR_FREE
, Node
);
1087 check_duplicates(&free
->hdr
);
1088 p
= avl_get_next(FreeBlockListRoot
, p
);
1090 current_entry
= UsedBlockListHead
.Flink
;
1091 while (current_entry
!= &UsedBlockListHead
)
1093 used
= CONTAINING_RECORD(current_entry
, HDR_USED
, ListEntry
);
1094 check_duplicates(&used
->hdr
);
1095 current_entry
= current_entry
->Flink
;
1102 free_pages(HDR_FREE
* blk
)
1107 start
= (ULONG_PTR
)blk
;
1108 end
= (ULONG_PTR
)blk
+ blk
->hdr
.Size
;
1111 * If the block doesn't contain a whole page then there is nothing to do
1113 if (PAGE_ROUND_UP(start
) >= PAGE_ROUND_DOWN(end
))
1120 static void remove_from_used_list(HDR_USED
* current
)
1122 RemoveEntryList(¤t
->ListEntry
);
1123 EiUsedNonPagedPool
-= current
->hdr
.Size
;
1127 static void remove_from_free_list(HDR_FREE
* current
)
1129 DPRINT("remove_from_free_list %d\n", current
->hdr
.Size
);
1131 avl_remove(&FreeBlockListRoot
, ¤t
->Node
, compare_node
);
1133 EiFreeNonPagedPool
-= current
->hdr
.Size
;
1135 DPRINT("remove_from_free_list done\n");
1138 DumpFreeBlockTree();
1143 add_to_free_list(HDR_FREE
* blk
)
1145 * FUNCTION: add the block to the free list (internal)
1149 BOOL UpdatePrevPtr
= FALSE
;
1151 DPRINT("add_to_free_list (%d)\n", blk
->hdr
.Size
);
1155 current
= (HDR_FREE
*)blk
->hdr
.previous
;
1156 if (current
&& current
->hdr
.Magic
== BLOCK_HDR_FREE_MAGIC
)
1158 remove_from_free_list(current
);
1159 current
->hdr
.Size
= current
->hdr
.Size
+ blk
->hdr
.Size
;
1160 current
->hdr
.Magic
= BLOCK_HDR_USED_MAGIC
;
1161 memset(blk
, 0xcc, HDR_USED_SIZE
);
1163 UpdatePrevPtr
= TRUE
;
1166 current
= (HDR_FREE
*)((ULONG_PTR
)blk
+ blk
->hdr
.Size
);
1167 if ((char*)current
< (char*)MiNonPagedPoolStart
+ MiNonPagedPoolLength
&&
1168 current
->hdr
.Magic
== BLOCK_HDR_FREE_MAGIC
)
1170 remove_from_free_list(current
);
1171 blk
->hdr
.Size
+= current
->hdr
.Size
;
1172 memset(current
, 0xcc, HDR_FREE_SIZE
);
1173 UpdatePrevPtr
= TRUE
;
1174 current
= (HDR_FREE
*)((ULONG_PTR
)blk
+ blk
->hdr
.Size
);
1176 if (UpdatePrevPtr
&&
1177 (char*)current
< (char*)MiNonPagedPoolStart
+ MiNonPagedPoolLength
)
1179 current
->hdr
.previous
= &blk
->hdr
;
1181 DPRINT("%d\n", blk
->hdr
.Size
);
1182 blk
->hdr
.Magic
= BLOCK_HDR_FREE_MAGIC
;
1183 EiFreeNonPagedPool
+= blk
->hdr
.Size
;
1184 avl_insert(&FreeBlockListRoot
, &blk
->Node
, compare_node
);
1185 DPRINT("add_to_free_list done\n");
1188 DumpFreeBlockTree();
1192 static void add_to_used_list(HDR_USED
* blk
)
1194 * FUNCTION: add the block to the used list (internal)
1197 InsertHeadList(&UsedBlockListHead
, &blk
->ListEntry
);
1198 EiUsedNonPagedPool
+= blk
->hdr
.Size
;
1204 grow_block(HDR_FREE
* blk
, PVOID end
)
1208 ULONG_PTR StartIndex
, EndIndex
;
1211 StartIndex
= (ULONG_PTR
)(PAGE_ROUND_UP((ULONG_PTR
)blk
+ HDR_FREE_SIZE
- (ULONG_PTR
)MiNonPagedPoolStart
)) / PAGE_SIZE
;
1212 EndIndex
= ((ULONG_PTR
)PAGE_ROUND_UP(end
) - (ULONG_PTR
)MiNonPagedPoolStart
) / PAGE_SIZE
;
1215 for (i
= StartIndex
; i
< EndIndex
; i
++)
1217 if (!(MiNonPagedPoolAllocMap
[i
/ 32] & (1 << (i
% 32))))
1219 for (j
= i
+ 1; j
< EndIndex
&& j
- i
< 32; j
++)
1221 if (MiNonPagedPoolAllocMap
[j
/ 32] & (1 << (j
% 32)))
1226 for (k
= 0; k
< j
- i
; k
++)
1228 Status
= MmRequestPageMemoryConsumer(MC_NPPOOL
, FALSE
, &Page
[k
]);
1229 if (!NT_SUCCESS(Status
))
1231 for (i
= 0; i
< k
; i
++)
1233 MmReleasePageMemoryConsumer(MC_NPPOOL
, Page
[i
]);
1238 Status
= MmCreateVirtualMapping(NULL
,
1239 (PVOID
)((ULONG_PTR
)MiNonPagedPoolStart
+ i
* PAGE_SIZE
),
1240 PAGE_READWRITE
|PAGE_SYSTEM
,
1243 if (!NT_SUCCESS(Status
))
1245 for (i
= 0; i
< k
; i
++)
1247 MmReleasePageMemoryConsumer(MC_NPPOOL
, Page
[i
]);
1251 for (j
= i
; j
< k
+ i
; j
++)
1253 MiNonPagedPoolAllocMap
[j
/ 32] |= (1 << (j
% 32));
1255 MiNonPagedPoolNrOfPages
+= k
;
1262 static HDR_USED
* get_block(unsigned int size
, unsigned long alignment
)
1264 HDR_FREE
*blk
, *current
, *previous
= NULL
, *next
= NULL
, *best
= NULL
;
1265 ULONG previous_size
= 0, current_size
, next_size
= 0, new_size
;
1267 PVOID addr
, aligned_addr
, best_aligned_addr
=NULL
;
1270 DPRINT("get_block %d\n", size
);
1272 p
= avl_find_equal_or_greater(FreeBlockListRoot
, size
+ HDR_USED_SIZE
, compare_value
);
1275 current
= CONTAINING_RECORD(p
, HDR_FREE
, Node
);
1276 addr
= (PVOID
)((ULONG_PTR
)current
+ HDR_USED_SIZE
);
1277 /* calculate first aligned address available within this block */
1278 aligned_addr
= alignment
> 0 ? MM_ROUND_UP(addr
, alignment
) : addr
;
1279 if (size
< PAGE_SIZE
)
1281 /* check that the block is in one page */
1282 if (PAGE_ROUND_DOWN(aligned_addr
) != PAGE_ROUND_DOWN((ULONG_PTR
)aligned_addr
+ size
- 1))
1284 aligned_addr
= (PVOID
)PAGE_ROUND_UP(aligned_addr
);
1287 DPRINT("%x %x\n", addr
, aligned_addr
);
1288 if (aligned_addr
!= addr
)
1290 while((ULONG_PTR
)aligned_addr
- (ULONG_PTR
)addr
< HDR_FREE_SIZE
)
1294 aligned_addr
= (PVOID
)((ULONG_PTR
)current
+ HDR_USED_SIZE
+ HDR_FREE_SIZE
);
1298 aligned_addr
= MM_ROUND_UP((PVOID
)((ULONG_PTR
)current
+ HDR_USED_SIZE
+ HDR_FREE_SIZE
), alignment
);
1300 if (size
< PAGE_SIZE
)
1302 /* check that the block is in one page */
1303 if (PAGE_ROUND_DOWN(aligned_addr
) != PAGE_ROUND_DOWN((ULONG_PTR
)aligned_addr
+ size
- 1))
1305 aligned_addr
= (PVOID
)PAGE_ROUND_UP(aligned_addr
);
1310 DPRINT("%x %x\n", addr
, aligned_addr
);
1311 new_size
= (ULONG_PTR
)aligned_addr
- (ULONG_PTR
)addr
+ size
;
1312 if (current
->hdr
.Size
>= new_size
+ HDR_USED_SIZE
&&
1313 (best
== NULL
|| current
->hdr
.Size
< best
->hdr
.Size
))
1316 best_aligned_addr
= aligned_addr
;
1317 if (new_size
<= size
+ 2 * HDR_FREE_SIZE
)
1325 if (size
< PAGE_SIZE
)
1327 if (current
->hdr
.Size
>= 2 * PAGE_SIZE
+ HDR_FREE_SIZE
)
1334 if (current
->hdr
.Size
>= size
+ alignment
+ HDR_FREE_SIZE
)
1340 p
= avl_get_next(FreeBlockListRoot
, p
);
1343 * We didn't find anything suitable at all.
1350 DPRINT(":: blk %x blk->hdr.Size %d (%d) Size %d\n", best
, best
->hdr
.Size
, best
->hdr
.Size
- HDR_USED_SIZE
, size
);
1353 current_size
= current
->hdr
.Size
- HDR_USED_SIZE
;
1354 addr
= (PVOID
)((ULONG_PTR
)current
+ HDR_USED_SIZE
);
1355 if (addr
!= best_aligned_addr
)
1357 blk
= (HDR_FREE
*)((ULONG_PTR
)best_aligned_addr
- HDR_USED_SIZE
);
1359 * if size-aligned, break off the preceding bytes into their own block...
1362 previous_size
= (ULONG_PTR
)blk
- (ULONG_PTR
)previous
- HDR_FREE_SIZE
;
1364 current_size
-= ((ULONG_PTR
)current
- (ULONG_PTR
)previous
);
1366 end
= (PVOID
)((ULONG_PTR
)current
+ HDR_USED_SIZE
+ size
);
1368 if (current_size
>= size
+ HDR_FREE_SIZE
+ MM_POOL_ALIGNMENT
)
1370 /* create a new free block after our block, if the memory size is >= 4 byte for this block */
1371 next
= (HDR_FREE
*)((ULONG_PTR
)current
+ size
+ HDR_USED_SIZE
);
1372 next_size
= current_size
- size
- HDR_FREE_SIZE
;
1373 current_size
= size
;
1374 end
= (PVOID
)((ULONG_PTR
)next
+ HDR_FREE_SIZE
);
1379 remove_from_free_list(previous
);
1380 if (!grow_block(previous
, end
))
1382 add_to_free_list(previous
);
1385 memset(current
, 0, HDR_USED_SIZE
);
1386 current
->hdr
.Size
= current_size
+ HDR_USED_SIZE
;
1387 current
->hdr
.Magic
= BLOCK_HDR_USED_MAGIC
;
1388 current
->hdr
.previous
= &previous
->hdr
;
1389 previous
->hdr
.Size
= previous_size
+ HDR_FREE_SIZE
;
1392 blk
= (HDR_FREE
*)((ULONG_PTR
)current
+ current
->hdr
.Size
);
1393 if ((ULONG_PTR
)blk
< (ULONG_PTR
)MiNonPagedPoolStart
+ MiNonPagedPoolLength
)
1395 blk
->hdr
.previous
= ¤t
->hdr
;
1399 add_to_free_list(previous
);
1403 remove_from_free_list(current
);
1405 if (!grow_block(current
, end
))
1407 add_to_free_list(current
);
1410 current
->hdr
.Magic
= BLOCK_HDR_USED_MAGIC
;
1413 current
->hdr
.Size
= current_size
+ HDR_USED_SIZE
;
1419 memset(next
, 0, HDR_FREE_SIZE
);
1420 next
->hdr
.Size
= next_size
+ HDR_FREE_SIZE
;
1421 next
->hdr
.Magic
= BLOCK_HDR_FREE_MAGIC
;
1422 next
->hdr
.previous
= ¤t
->hdr
;
1423 blk
= (HDR_FREE
*)((ULONG_PTR
)next
+ next
->hdr
.Size
);
1424 if ((ULONG_PTR
)blk
< (ULONG_PTR
)MiNonPagedPoolStart
+ MiNonPagedPoolLength
)
1426 blk
->hdr
.previous
= &next
->hdr
;
1428 add_to_free_list(next
);
1431 add_to_used_list((HDR_USED
*)current
);
1434 if (size
< PAGE_SIZE
)
1436 addr
= (PVOID
)((ULONG_PTR
)current
+ HDR_USED_SIZE
);
1437 if (PAGE_ROUND_DOWN(addr
) != PAGE_ROUND_DOWN((PVOID
)((ULONG_PTR
)addr
+ size
- 1)))
1439 DPRINT1("%x %x\n", addr
, (ULONG_PTR
)addr
+ size
);
1441 ASSERT (PAGE_ROUND_DOWN(addr
) == PAGE_ROUND_DOWN((PVOID
)((ULONG_PTR
)addr
+ size
- 1)));
1445 addr
= (PVOID
)((ULONG_PTR
)current
+ HDR_USED_SIZE
);
1446 ASSERT(MM_ROUND_UP(addr
, alignment
) == addr
);
1448 return (HDR_USED
*)current
;
1452 ExRosQueryNonPagedPoolTag ( PVOID Addr
)
1454 HDR_USED
* blk
=(HDR_USED
*)((ULONG_PTR
)Addr
- HDR_USED_SIZE
);
1455 if (blk
->hdr
.Magic
!= BLOCK_HDR_USED_MAGIC
)
1461 VOID STDCALL
ExFreeNonPagedPool (PVOID block
)
1463 * FUNCTION: Releases previously allocated memory
1465 * block = block to free
1468 HDR_USED
* blk
=(HDR_USED
*)((ULONG_PTR
)block
- HDR_USED_SIZE
);
1476 DPRINT("freeing block %x\n",blk
);
1478 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block
,blk
->hdr
.Size
,
1480 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
1483 if (blk
->hdr
.Magic
!= BLOCK_HDR_USED_MAGIC
)
1485 if (blk
->hdr
.Magic
== BLOCK_HDR_FREE_MAGIC
)
1487 DbgPrint("ExFreePool of already freed address %x\n", block
);
1491 DbgPrint("ExFreePool of non-allocated address %x (magic %x)\n",
1492 block
, blk
->hdr
.Magic
);
1497 memset(block
, 0xcc, blk
->hdr
.Size
- HDR_USED_SIZE
);
1498 #ifdef TAG_STATISTICS_TRACKING
1500 MiRemoveFromTagHashTable(blk
);
1503 remove_from_used_list(blk
);
1504 blk
->hdr
.Magic
= BLOCK_HDR_FREE_MAGIC
;
1505 add_to_free_list((HDR_FREE
*)blk
);
1507 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
1511 ExAllocateNonPagedPoolWithTag(POOL_TYPE Type
, ULONG Size
, ULONG Tag
, PVOID Caller
)
1514 HDR_USED
* best
= NULL
;
1518 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
1521 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
1526 /* after some allocations print the npaged pool stats */
1527 #ifdef TAG_STATISTICS_TRACKING
1530 static ULONG counter
= 0;
1531 if (counter
++ % 100000 == 0)
1533 MiDebugDumpNonPagedPoolStats(FALSE
);
1539 * accomodate this useful idiom
1543 POOL_TRACE("= NULL\n");
1544 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
1547 /* Make the size dword alligned, this makes the block dword alligned */
1548 Size
= ROUND_UP(Size
, MM_POOL_ALIGNMENT
);
1550 if (Size
>= PAGE_SIZE
)
1552 alignment
= PAGE_SIZE
;
1554 else if (Type
& CACHE_ALIGNED_POOL_MASK
)
1556 alignment
= MM_CACHE_LINE_SIZE
;
1563 best
= get_block(Size
, alignment
);
1566 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
1567 DPRINT1("Trying to allocate %lu bytes from nonpaged pool - nothing suitable found, returning NULL\n",
1572 best
->Caller
= Caller
;
1573 best
->Dumped
= FALSE
;
1574 best
->TagListEntry
.Flink
= best
->TagListEntry
.Blink
= NULL
;
1575 #ifdef TAG_STATISTICS_TRACKING
1577 MiAddToTagHashTable(best
);
1580 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
1581 block
= (PVOID
)((ULONG_PTR
)best
+ HDR_USED_SIZE
);
1582 /* RtlZeroMemory(block, Size);*/
1589 MiInitializeNonPagedPool(VOID
)
1597 #ifdef TAG_STATISTICS_TRACKING
1599 for (i
= 0; i
< TAG_HASH_TABLE_SIZE
; i
++)
1601 InitializeListHead(&tag_hash_table
[i
]);
1604 KeInitializeSpinLock(&MmNpoolLock
);
1605 InitializeListHead(&UsedBlockListHead
);
1606 InitializeListHead(&AddressListHead
);
1607 FreeBlockListRoot
= NULL
;
1609 MiNonPagedPoolAllocMap
= (PVOID
)((ULONG_PTR
)MiNonPagedPoolStart
+ PAGE_SIZE
);
1610 MiNonPagedPoolNrOfPages
= PAGE_ROUND_UP(ROUND_UP(MiNonPagedPoolLength
/ PAGE_SIZE
, 32) / 8 + HDR_FREE_SIZE
) + PAGE_SIZE
;
1611 MiNonPagedPoolNrOfPages
/= PAGE_SIZE
;
1612 Address
= MiNonPagedPoolStart
;
1614 for (i
= 0; i
< MiNonPagedPoolNrOfPages
; i
++)
1616 Status
= MmRequestPageMemoryConsumer(MC_NPPOOL
, FALSE
, &Page
);
1617 if (!NT_SUCCESS(Status
))
1619 DbgPrint("Unable to allocate a page\n");
1623 Status
= MmCreateVirtualMapping(NULL
,
1625 PAGE_READWRITE
|PAGE_SYSTEM
,
1628 if (!NT_SUCCESS(Status
))
1630 DbgPrint("Unable to create virtual mapping\n");
1633 Address
= (PVOID
)((ULONG_PTR
)Address
+ PAGE_SIZE
);
1636 for (i
= 0; i
< MiNonPagedPoolNrOfPages
; i
++)
1638 MiNonPagedPoolAllocMap
[i
/ 32] |= (1 << (i
% 32));
1641 /* the first block is free */
1642 free
= (HDR_FREE
*)MiNonPagedPoolStart
;
1643 free
->hdr
.Magic
= BLOCK_HDR_FREE_MAGIC
;
1644 free
->hdr
.Size
= PAGE_SIZE
- HDR_USED_SIZE
;
1645 free
->hdr
.previous
= NULL
;
1646 memset((PVOID
)((ULONG_PTR
)free
+ HDR_FREE_SIZE
), 0xcc, free
->hdr
.Size
- HDR_FREE_SIZE
);
1647 add_to_free_list(free
);
1649 /* the second block contains the non paged pool bitmap */
1650 used
= (HDR_USED
*)((ULONG_PTR
)free
+ free
->hdr
.Size
);
1651 used
->hdr
.Magic
= BLOCK_HDR_USED_MAGIC
;
1652 used
->hdr
.Size
= ROUND_UP(MiNonPagedPoolLength
/ PAGE_SIZE
, 32) / 8 + HDR_USED_SIZE
;
1653 used
->hdr
.previous
= &free
->hdr
;
1654 used
->Tag
= 0xffffffff;
1656 used
->Dumped
= FALSE
;
1657 add_to_used_list(used
);
1658 #ifdef TAG_STATISTICS_TRACKING
1659 MiAddToTagHashTable(used
);
1662 /* the third block is the free block after the bitmap */
1663 free
= (HDR_FREE
*)((ULONG_PTR
)used
+ used
->hdr
.Size
);
1664 free
->hdr
.Magic
= BLOCK_HDR_FREE_MAGIC
;
1665 free
->hdr
.Size
= MiNonPagedPoolLength
- ((ULONG_PTR
)free
- (ULONG_PTR
)MiNonPagedPoolStart
);
1666 free
->hdr
.previous
= &used
->hdr
;
1667 memset((PVOID
)((ULONG_PTR
)free
+ HDR_FREE_SIZE
), 0x0cc, (ULONG_PTR
)Address
- (ULONG_PTR
)free
- HDR_FREE_SIZE
);
1668 add_to_free_list(free
);
1673 MiAllocateSpecialPool (IN POOL_TYPE PoolType
,
1674 IN SIZE_T NumberOfBytes
,
1679 /* FIXME: Special Pools not Supported */
1680 DbgPrint("Special Pools not supported\n");