1 /* $Id: npool.c,v 1.78 2003/12/13 21:11:53 gvg Exp $
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/npool.c
6 * PURPOSE: Implements the kernel memory pool
7 * PROGRAMMER: David Welch (welch@cwcom.net)
10 * 10/06/98: Bug fixes by Iwan Fatahi (i_fatahi@hotmail.com)
11 * in take_block (if current bigger than required)
12 * in remove_from_used_list
14 * 23/08/98: Fixes from Robert Bergkvist (fragdance@hotmail.com)
17 /* INCLUDES ****************************************************************/
19 #include <ddk/ntddk.h>
20 #include <internal/mm.h>
21 #include <internal/ntoskrnl.h>
22 #include <internal/pool.h>
25 #include <internal/debug.h>
27 /* Enable strict checking of the nonpaged pool on every allocation */
28 /*#define ENABLE_VALIDATE_POOL*/
30 /* Enable tracking of statistics about the tagged blocks in the pool */
31 #define TAG_STATISTICS_TRACKING
34 * Put each block in its own range of pages and position the block at the
35 * end of the range so any accesses beyond the end of block are to invalid
38 /*#define WHOLE_PAGE_ALLOCATIONS*/
40 #ifdef ENABLE_VALIDATE_POOL
41 #define VALIDATE_POOL validate_kernel_pool()
47 #define POOL_TRACE(args...) do { DbgPrint(args); } while(0);
49 #define POOL_TRACE(args...)
52 /* avl types ****************************************************************/
55 * This declarations should be moved into a separate header file.
60 struct _NODE
* link
[2];
65 /* TYPES *******************************************************************/
67 #define BLOCK_HDR_USED_MAGIC (0xdeadbeef)
68 #define BLOCK_HDR_FREE_MAGIC (0xceadbeef)
71 * fields present at the start of a block (this is for internal use only)
73 typedef struct _BLOCK_HDR
77 struct _BLOCK_HDR
* previous
;
85 LIST_ENTRY TagListEntry
;
95 #define BLOCK_HDR_SIZE ROUND_UP(sizeof(BLOCK_HDR), MM_POOL_ALIGNMENT)
98 ExAllocateWholePageBlock(ULONG Size
);
100 ExFreeWholePageBlock(PVOID Addr
);
102 /* GLOBALS *****************************************************************/
104 extern PVOID MiNonPagedPoolStart
;
105 extern ULONG MiNonPagedPoolLength
;
108 * Head of the list of free blocks
110 static PNODE FreeBlockListRoot
= NULL
;
113 * Head of the list of in use block
115 static LIST_ENTRY UsedBlockListHead
;
117 static LIST_ENTRY AddressListHead
;
119 #ifndef WHOLE_PAGE_ALLOCATIONS
121 * Count of free blocks
123 static ULONG EiNrFreeBlocks
= 0;
126 * Count of used blocks
128 static ULONG EiNrUsedBlocks
= 0;
132 * Lock that protects the non-paged pool data structures
134 static KSPIN_LOCK MmNpoolLock
;
137 * Total memory used for free nonpaged pool blocks
139 ULONG EiFreeNonPagedPool
= 0;
142 * Total memory used for nonpaged pool blocks
144 ULONG EiUsedNonPagedPool
= 0;
147 * Allocate a range of memory in the nonpaged pool
150 MiAllocNonPagedPoolRegion(unsigned int nr_pages
);
153 MiFreeNonPagedPoolRegion(PVOID Addr
, ULONG Count
, BOOLEAN Free
);
155 #ifdef TAG_STATISTICS_TRACKING
156 #define TAG_HASH_TABLE_SIZE (1024)
157 static LIST_ENTRY tag_hash_table
[TAG_HASH_TABLE_SIZE
];
158 #endif /* TAG_STATISTICS_TRACKING */
160 #ifdef WHOLE_PAGE_ALLOCATIONS
161 static RTL_BITMAP NonPagedPoolAllocMap
;
162 static ULONG NonPagedPoolAllocMapHint
;
163 static ULONG MiCurrentNonPagedPoolLength
= 0;
165 static PULONG MiNonPagedPoolAllocMap
;
166 static ULONG MiNonPagedPoolNrOfPages
;
167 #endif /* WHOLE_PAGE_ALLOCATIONS */
169 /* avl helper functions ****************************************************/
171 void DumpFreeBlockNode(PNODE p
)
173 static int count
= 0;
180 DumpFreeBlockNode(p
->link
[0]);
181 blk
= CONTAINING_RECORD(p
, BLOCK_HDR
, Free
.Node
);
182 DbgPrint("%08x %8d (%d)\n", blk
, blk
->Size
, count
);
183 DumpFreeBlockNode(p
->link
[1]);
188 void DumpFreeBlockTree(void)
190 DbgPrint("--- Begin tree ------------------\n");
191 DbgPrint("%08x\n", CONTAINING_RECORD(FreeBlockListRoot
, BLOCK_HDR
, Free
.Node
));
192 DumpFreeBlockNode(FreeBlockListRoot
);
193 DbgPrint("--- End tree --------------------\n");
196 int compare_node(PNODE p1
, PNODE p2
)
198 BLOCK_HDR
* blk1
= CONTAINING_RECORD(p1
, BLOCK_HDR
, Free
.Node
);
199 BLOCK_HDR
* blk2
= CONTAINING_RECORD(p2
, BLOCK_HDR
, Free
.Node
);
201 if (blk1
->Size
== blk2
->Size
)
214 if (blk1
->Size
< blk2
->Size
)
218 if (blk1
->Size
> blk2
->Size
)
227 int compare_value(PVOID value
, PNODE p
)
229 BLOCK_HDR
* blk
= CONTAINING_RECORD(p
, BLOCK_HDR
, Free
.Node
);
230 ULONG v
= *(PULONG
)value
;
243 /* avl functions **********************************************************/
246 * The avl functions should be moved into a separate file.
249 /* The avl_insert and avl_remove are based on libavl (library for manipulation of binary trees). */
251 void avl_insert (PNODE
* root
, PNODE n
, int (*compare
)(PNODE
, PNODE
))
253 PNODE y
; /* Top node to update balance factor, and parent. */
254 PNODE p
, q
; /* Iterator, and parent. */
255 PNODE w
; /* New root of rebalanced subtree. */
256 int dir
; /* Direction to descend. */
258 n
->link
[0] = n
->link
[1] = n
->parent
= NULL
;
262 for (q
= NULL
, p
= *root
; p
!= NULL
; q
= p
, p
= p
->link
[dir
])
264 dir
= compare(n
, p
) > 0;
286 for (p
= n
; p
!= y
; p
= q
)
289 dir
= q
->link
[0] != p
;
300 if (y
->balance
== -2)
302 PNODE x
= y
->link
[0];
303 if (x
->balance
== -1)
306 y
->link
[0] = x
->link
[1];
308 x
->balance
= y
->balance
= 0;
309 x
->parent
= y
->parent
;
311 if (y
->link
[0] != NULL
)
313 y
->link
[0]->parent
= y
;
318 assert (x
->balance
== +1);
320 x
->link
[1] = w
->link
[0];
322 y
->link
[0] = w
->link
[1];
324 if (w
->balance
== -1)
329 else if (w
->balance
== 0)
331 x
->balance
= y
->balance
= 0;
333 else /* |w->pavl_balance == +1| */
339 w
->parent
= y
->parent
;
340 x
->parent
= y
->parent
= w
;
341 if (x
->link
[1] != NULL
)
343 x
->link
[1]->parent
= x
;
345 if (y
->link
[0] != NULL
)
347 y
->link
[0]->parent
= y
;
351 else if (y
->balance
== +2)
353 PNODE x
= y
->link
[1];
354 if (x
->balance
== +1)
357 y
->link
[1] = x
->link
[0];
359 x
->balance
= y
->balance
= 0;
360 x
->parent
= y
->parent
;
362 if (y
->link
[1] != NULL
)
364 y
->link
[1]->parent
= y
;
369 assert (x
->balance
== -1);
371 x
->link
[0] = w
->link
[1];
373 y
->link
[1] = w
->link
[0];
380 else if (w
->balance
== 0)
382 x
->balance
= y
->balance
= 0;
384 else /* |w->pavl_balance == -1| */
390 w
->parent
= y
->parent
;
391 x
->parent
= y
->parent
= w
;
392 if (x
->link
[0] != NULL
)
394 x
->link
[0]->parent
= x
;
396 if (y
->link
[1] != NULL
)
398 y
->link
[1]->parent
= y
;
406 if (w
->parent
!= NULL
)
408 w
->parent
->link
[y
!= w
->parent
->link
[0]] = w
;
418 void avl_remove (PNODE
*root
, PNODE item
, int (*compare
)(PNODE
, PNODE
))
420 PNODE p
; /* Traverses tree to find node to delete. */
421 PNODE q
; /* Parent of |p|. */
422 int dir
; /* Side of |q| on which |p| is linked. */
424 if (root
== NULL
|| *root
== NULL
)
438 dir
= compare(p
, q
) > 0;
441 if (p
->link
[1] == NULL
)
443 q
->link
[dir
] = p
->link
[0];
444 if (q
->link
[dir
] != NULL
)
446 q
->link
[dir
]->parent
= p
->parent
;
451 PNODE r
= p
->link
[1];
452 if (r
->link
[0] == NULL
)
454 r
->link
[0] = p
->link
[0];
456 r
->parent
= p
->parent
;
457 if (r
->link
[0] != NULL
)
459 r
->link
[0]->parent
= r
;
461 r
->balance
= p
->balance
;
467 PNODE s
= r
->link
[0];
468 while (s
->link
[0] != NULL
)
473 r
->link
[0] = s
->link
[1];
474 s
->link
[0] = p
->link
[0];
475 s
->link
[1] = p
->link
[1];
477 if (s
->link
[0] != NULL
)
479 s
->link
[0]->parent
= s
;
481 s
->link
[1]->parent
= s
;
482 s
->parent
= p
->parent
;
483 if (r
->link
[0] != NULL
)
485 r
->link
[0]->parent
= r
;
487 s
->balance
= p
->balance
;
493 item
->link
[0] = item
->link
[1] = item
->parent
= NULL
;
496 while (q
!= (PNODE
) root
)
500 if (y
->parent
!= NULL
)
511 dir
= q
->link
[0] != y
;
513 if (y
->balance
== +1)
517 else if (y
->balance
== +2)
519 PNODE x
= y
->link
[1];
520 if (x
->balance
== -1)
524 assert (x
->balance
== -1);
526 x
->link
[0] = w
->link
[1];
528 y
->link
[1] = w
->link
[0];
530 if (w
->balance
== +1)
535 else if (w
->balance
== 0)
537 x
->balance
= y
->balance
= 0;
539 else /* |w->pavl_balance == -1| */
545 w
->parent
= y
->parent
;
546 x
->parent
= y
->parent
= w
;
547 if (x
->link
[0] != NULL
)
549 x
->link
[0]->parent
= x
;
551 if (y
->link
[1] != NULL
)
553 y
->link
[1]->parent
= y
;
559 y
->link
[1] = x
->link
[0];
561 x
->parent
= y
->parent
;
563 if (y
->link
[1] != NULL
)
565 y
->link
[1]->parent
= y
;
576 x
->balance
= y
->balance
= 0;
584 dir
= q
->link
[0] != y
;
586 if (y
->balance
== -1)
590 else if (y
->balance
== -2)
592 PNODE x
= y
->link
[0];
593 if (x
->balance
== +1)
596 assert (x
->balance
== +1);
598 x
->link
[1] = w
->link
[0];
600 y
->link
[0] = w
->link
[1];
602 if (w
->balance
== -1)
607 else if (w
->balance
== 0)
609 x
->balance
= y
->balance
= 0;
611 else /* |w->pavl_balance == +1| */
617 w
->parent
= y
->parent
;
618 x
->parent
= y
->parent
= w
;
619 if (x
->link
[1] != NULL
)
621 x
->link
[1]->parent
= x
;
623 if (y
->link
[0] != NULL
)
625 y
->link
[0]->parent
= y
;
631 y
->link
[0] = x
->link
[1];
633 x
->parent
= y
->parent
;
635 if (y
->link
[0] != NULL
)
637 y
->link
[0]->parent
= y
;
648 x
->balance
= y
->balance
= 0;
658 PNODE _cdecl
avl_get_first(PNODE root
)
673 PNODE
avl_get_next(PNODE root
, PNODE p
)
688 while (q
&& q
->link
[1] == p
)
704 PNODE
avl_find_equal_or_greater(PNODE root
, ULONG size
, int (compare
)(PVOID
, PNODE
))
710 for (p
= root
; p
!= NULL
;)
712 cmp
= compare((PVOID
)&size
, p
);
726 cmp
= compare((PVOID
)&size
, p
->link
[0]);
739 /* non paged pool functions ************************************************/
741 #ifdef TAG_STATISTICS_TRACKING
743 MiRemoveFromTagHashTable(BLOCK_HDR
* block
)
745 * Remove a block from the tag hash table
748 if (block
->Used
.Tag
== 0)
753 RemoveEntryList(&block
->Used
.TagListEntry
);
757 MiAddToTagHashTable(BLOCK_HDR
* block
)
759 * Add a block to the tag hash table
764 if (block
->Used
.Tag
== 0)
769 hash
= block
->Used
.Tag
% TAG_HASH_TABLE_SIZE
;
771 InsertHeadList(&tag_hash_table
[hash
], &block
->Used
.TagListEntry
);
773 #endif /* TAG_STATISTICS_TRACKING */
775 #if defined(TAG_STATISTICS_TRACKING) && !defined(WHOLE_PAGE_ALLOCATIONS)
777 MiDumpTagStats(ULONG CurrentTag
, ULONG CurrentNrBlocks
, ULONG CurrentSize
)
781 c1
= (CurrentTag
>> 24) & 0xFF;
782 c2
= (CurrentTag
>> 16) & 0xFF;
783 c3
= (CurrentTag
>> 8) & 0xFF;
784 c4
= CurrentTag
& 0xFF;
786 if (isprint(c1
) && isprint(c2
) && isprint(c3
) && isprint(c4
))
788 DbgPrint("Tag %x (%c%c%c%c) Blocks %d Total Size %d Average Size %d\n",
789 CurrentTag
, c4
, c3
, c2
, c1
, CurrentNrBlocks
,
790 CurrentSize
, CurrentSize
/ CurrentNrBlocks
);
794 DbgPrint("Tag %x Blocks %d Total Size %d Average Size %d\n",
795 CurrentTag
, CurrentNrBlocks
, CurrentSize
,
796 CurrentSize
/ CurrentNrBlocks
);
799 #endif /* defined(TAG_STATISTICS_TRACKING) && !defined(WHOLE_PAGE_ALLOCATIONS); */
802 MiDebugDumpNonPagedPoolStats(BOOLEAN NewOnly
)
804 #if defined(TAG_STATISTICS_TRACKING) && !defined(WHOLE_PAGE_ALLOCATIONS)
808 ULONG CurrentNrBlocks
;
813 LIST_ENTRY tmpListHead
;
814 PLIST_ENTRY current_entry
;
816 DbgPrint("******* Dumping non paging pool stats ******\n");
819 for (i
= 0; i
< TAG_HASH_TABLE_SIZE
; i
++)
821 InitializeListHead(&tmpListHead
);
823 while (!IsListEmpty(&tag_hash_table
[i
]))
827 current_entry
= tag_hash_table
[i
].Flink
;
828 while (current_entry
!= &tag_hash_table
[i
])
830 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, Used
.TagListEntry
);
831 current_entry
= current_entry
->Flink
;
834 CurrentTag
= current
->Used
.Tag
;
838 if (current
->Used
.Tag
== CurrentTag
)
840 RemoveEntryList(¤t
->Used
.TagListEntry
);
841 InsertHeadList(&tmpListHead
, ¤t
->Used
.TagListEntry
);
842 if (!NewOnly
|| !current
->Used
.Dumped
)
846 CurrentSize
+= current
->Size
;
847 TotalSize
+= current
->Size
;
848 current
->Used
.Dumped
= TRUE
;
852 if (CurrentTag
!= 0 && CurrentNrBlocks
!= 0)
854 MiDumpTagStats(CurrentTag
, CurrentNrBlocks
, CurrentSize
);
857 if (!IsListEmpty(&tmpListHead
))
859 tag_hash_table
[i
].Flink
= tmpListHead
.Flink
;
860 tag_hash_table
[i
].Flink
->Blink
= &tag_hash_table
[i
];
861 tag_hash_table
[i
].Blink
= tmpListHead
.Blink
;
862 tag_hash_table
[i
].Blink
->Flink
= &tag_hash_table
[i
];
865 if (TotalBlocks
!= 0)
867 DbgPrint("TotalBlocks %d TotalSize %d AverageSize %d\n",
868 TotalBlocks
, TotalSize
, TotalSize
/ TotalBlocks
);
872 DbgPrint("TotalBlocks %d TotalSize %d\n",
873 TotalBlocks
, TotalSize
);
875 Size
= EiFreeNonPagedPool
- (MiNonPagedPoolLength
- MiNonPagedPoolNrOfPages
* PAGE_SIZE
);
876 DbgPrint("Freeblocks %d TotalFreeSize %d AverageFreeSize %d\n",
877 EiNrFreeBlocks
, Size
, EiNrFreeBlocks
? Size
/ EiNrFreeBlocks
: 0);
878 DbgPrint("***************** Dump Complete ***************\n");
879 #endif /* defined(TAG_STATISTICS_TRACKING) && !defined(WHOLE_PAGE_ALLOCATIONS) */
883 MiDebugDumpNonPagedPool(BOOLEAN NewOnly
)
885 #ifndef WHOLE_PAGE_ALLOCATIONS
887 PLIST_ENTRY current_entry
;
890 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
892 DbgPrint("******* Dumping non paging pool contents ******\n");
893 current_entry
= UsedBlockListHead
.Flink
;
894 while (current_entry
!= &UsedBlockListHead
)
896 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, Used
.ListEntry
);
897 if (!NewOnly
|| !current
->Used
.Dumped
)
901 c1
= (current
->Used
.Tag
>> 24) & 0xFF;
902 c2
= (current
->Used
.Tag
>> 16) & 0xFF;
903 c3
= (current
->Used
.Tag
>> 8) & 0xFF;
904 c4
= current
->Used
.Tag
& 0xFF;
906 if (isprint(c1
) && isprint(c2
) && isprint(c3
) && isprint(c4
))
908 DbgPrint("Size 0x%x Tag 0x%x (%c%c%c%c) Allocator 0x%x\n",
909 current
->Size
, current
->Used
.Tag
, c4
, c3
, c2
, c1
,
910 current
->Used
.Caller
);
914 DbgPrint("Size 0x%x Tag 0x%x Allocator 0x%x\n",
915 current
->Size
, current
->Used
.Tag
, current
->Used
.Caller
);
917 current
->Used
.Dumped
= TRUE
;
919 current_entry
= current_entry
->Flink
;
921 DbgPrint("***************** Dump Complete ***************\n");
922 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
923 #endif /* not WHOLE_PAGE_ALLOCATIONS */
926 #ifndef WHOLE_PAGE_ALLOCATIONS
928 #ifdef ENABLE_VALIDATE_POOL
929 static void validate_free_list(void)
931 * FUNCTION: Validate the integrity of the list of free blocks
935 unsigned int blocks_seen
=0;
938 p
= avl_get_first(FreeBlockListRoot
);
944 current
= CONTAINING_RECORD(p
, BLOCK_HDR
, Free
.Node
);
945 base_addr
= (PVOID
)current
;
947 if (current
->Magic
!= BLOCK_HDR_FREE_MAGIC
)
949 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
951 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
954 if (base_addr
< MiNonPagedPoolStart
||
955 base_addr
+ BLOCK_HDR_SIZE
+ current
->Size
> MiNonPagedPoolStart
+ MiNonPagedPoolLength
)
957 DbgPrint("Block %x found outside pool area\n",current
);
958 DbgPrint("Size %d\n",current
->Size
);
959 DbgPrint("Limits are %x %x\n",MiNonPagedPoolStart
,
960 MiNonPagedPoolStart
+MiNonPagedPoolLength
);
961 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
964 if (blocks_seen
> EiNrFreeBlocks
)
966 DbgPrint("Too many blocks on free list\n");
967 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
969 p
= avl_get_next(FreeBlockListRoot
, p
);
973 static void validate_used_list(void)
975 * FUNCTION: Validate the integrity of the list of used blocks
979 PLIST_ENTRY current_entry
;
980 unsigned int blocks_seen
=0;
982 current_entry
= UsedBlockListHead
.Flink
;
983 while (current_entry
!= &UsedBlockListHead
)
987 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, Used
.ListEntry
);
988 base_addr
= (PVOID
)current
;
990 if (current
->Magic
!= BLOCK_HDR_USED_MAGIC
)
992 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
994 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
996 if (base_addr
< MiNonPagedPoolStart
||
997 (base_addr
+BLOCK_HDR_SIZE
+current
->Size
) >
998 MiNonPagedPoolStart
+MiNonPagedPoolLength
)
1000 DbgPrint("Block %x found outside pool area\n",current
);
1001 DbgPrint("Size %d\n",current
->Size
);
1002 DbgPrint("Limits are %x %x\n",MiNonPagedPoolStart
,
1003 MiNonPagedPoolStart
+MiNonPagedPoolLength
);
1004 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1007 if (blocks_seen
> EiNrUsedBlocks
)
1009 DbgPrint("Too many blocks on used list\n");
1010 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1012 if (current
->Used
.ListEntry
.Flink
!= &UsedBlockListHead
&&
1013 current
->Used
.ListEntry
.Flink
->Blink
!= ¤t
->Used
.ListEntry
)
1015 DbgPrint("%s:%d:Break in list (current %x next %x "
1016 "current->next->previous %x)\n",
1017 __FILE__
,__LINE__
,current
, current
->Used
.ListEntry
.Flink
,
1018 current
->Used
.ListEntry
.Flink
->Blink
);
1019 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1022 current_entry
= current_entry
->Flink
;
1026 static void check_duplicates(BLOCK_HDR
* blk
)
1028 * FUNCTION: Check a block has no duplicates
1030 * blk = block to check
1031 * NOTE: Bug checks if duplicates are found
1034 char* base
= (char*)blk
;
1035 char* last
= ((char*)blk
) + BLOCK_HDR_SIZE
+ blk
->Size
;
1037 PLIST_ENTRY current_entry
;
1040 p
= avl_get_first(FreeBlockListRoot
);
1044 current
= CONTAINING_RECORD(p
, BLOCK_HDR
, Free
.Node
);
1046 if (current
->Magic
!= BLOCK_HDR_FREE_MAGIC
)
1048 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
1050 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1053 if ( (char*)current
> base
&& (char*)current
< last
)
1055 DbgPrint("intersecting blocks on list\n");
1056 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1058 if ( (char*)current
< base
&&
1059 ((char*)current
+ current
->Size
+ BLOCK_HDR_SIZE
)
1062 DbgPrint("intersecting blocks on list\n");
1063 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1065 p
= avl_get_next(FreeBlockListRoot
, p
);
1068 current_entry
= UsedBlockListHead
.Flink
;
1069 while (current_entry
!= &UsedBlockListHead
)
1071 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, Used
.ListEntry
);
1073 if ( (char*)current
> base
&& (char*)current
< last
)
1075 DbgPrint("intersecting blocks on list\n");
1076 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1078 if ( (char*)current
< base
&&
1079 ((char*)current
+ current
->Size
+ BLOCK_HDR_SIZE
)
1082 DbgPrint("intersecting blocks on list\n");
1083 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1086 current_entry
= current_entry
->Flink
;
1091 static void validate_kernel_pool(void)
1093 * FUNCTION: Checks the integrity of the kernel memory heap
1097 PLIST_ENTRY current_entry
;
1100 validate_free_list();
1101 validate_used_list();
1103 p
= avl_get_first(FreeBlockListRoot
);
1106 current
= CONTAINING_RECORD(p
, BLOCK_HDR
, Free
.Node
);
1107 check_duplicates(current
);
1108 p
= avl_get_next(FreeBlockListRoot
, p
);
1110 current_entry
= UsedBlockListHead
.Flink
;
1111 while (current_entry
!= &UsedBlockListHead
)
1113 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, Used
.ListEntry
);
1114 check_duplicates(current
);
1115 current_entry
= current_entry
->Flink
;
1122 free_pages(BLOCK_HDR
* blk
)
1129 end
= (ULONG
)blk
+ BLOCK_HDR_SIZE
+ blk
->Size
;
1132 * If the block doesn't contain a whole page then there is nothing to do
1134 if (PAGE_ROUND_UP(start
) >= PAGE_ROUND_DOWN(end
))
1141 static void remove_from_used_list(BLOCK_HDR
* current
)
1143 RemoveEntryList(¤t
->Used
.ListEntry
);
1144 EiUsedNonPagedPool
-= current
->Size
;
1148 static void remove_from_free_list(BLOCK_HDR
* current
)
1150 DPRINT("remove_from_free_list %d\n", current
->Size
);
1152 avl_remove(&FreeBlockListRoot
, ¤t
->Free
.Node
, compare_node
);
1154 EiFreeNonPagedPool
-= current
->Size
;
1156 DPRINT("remove_from_free_list done\n");
1158 DumpFreeBlockTree();
1163 add_to_free_list(BLOCK_HDR
* blk
)
1165 * FUNCTION: add the block to the free list (internal)
1170 DPRINT("add_to_free_list (%d)\n", blk
->Size
);
1174 current
= blk
->previous
;
1175 if (current
&& current
->Magic
== BLOCK_HDR_FREE_MAGIC
)
1177 remove_from_free_list(current
);
1178 current
->Size
= current
->Size
+ BLOCK_HDR_SIZE
+ blk
->Size
;
1179 current
->Magic
= BLOCK_HDR_USED_MAGIC
;
1180 memset(blk
, 0xcc, BLOCK_HDR_SIZE
);
1184 current
= (BLOCK_HDR
*)((char*)blk
+ BLOCK_HDR_SIZE
+ blk
->Size
);
1185 if ((PVOID
)current
< MiNonPagedPoolStart
+ MiNonPagedPoolLength
&&
1186 current
->Magic
== BLOCK_HDR_FREE_MAGIC
)
1188 remove_from_free_list(current
);
1189 blk
->Size
+= BLOCK_HDR_SIZE
+ current
->Size
;
1190 memset(current
, 0xcc, BLOCK_HDR_SIZE
);
1191 current
= (BLOCK_HDR
*)((char*)blk
+ BLOCK_HDR_SIZE
+ blk
->Size
);
1192 if ((PVOID
)current
< MiNonPagedPoolStart
+ MiNonPagedPoolLength
)
1194 current
->previous
= blk
;
1198 DPRINT("%d\n", blk
->Size
);
1199 blk
->Magic
= BLOCK_HDR_FREE_MAGIC
;
1200 EiFreeNonPagedPool
+= blk
->Size
;
1201 avl_insert(&FreeBlockListRoot
, &blk
->Free
.Node
, compare_node
);
1203 DPRINT("add_to_free_list done\n");
1205 DumpFreeBlockTree();
1209 static void add_to_used_list(BLOCK_HDR
* blk
)
1211 * FUNCTION: add the block to the used list (internal)
1214 InsertHeadList(&UsedBlockListHead
, &blk
->Used
.ListEntry
);
1215 EiUsedNonPagedPool
+= blk
->Size
;
1219 inline static void* block_to_address(BLOCK_HDR
* blk
)
1221 * FUNCTION: Translate a block header address to the corresponding block
1222 * address (internal)
1225 return ( (void *) ((char*)blk
+ BLOCK_HDR_SIZE
));
1228 inline static BLOCK_HDR
* address_to_block(void* addr
)
1230 return (BLOCK_HDR
*)
1231 ( ((char*)addr
) - BLOCK_HDR_SIZE
);
1235 grow_block(BLOCK_HDR
* blk
, PVOID end
)
1238 PHYSICAL_ADDRESS Page
;
1239 BOOLEAN result
= TRUE
;
1242 PVOID start
= (PVOID
)PAGE_ROUND_UP((ULONG
)((char*)blk
+ BLOCK_HDR_SIZE
));
1243 end
= (PVOID
)PAGE_ROUND_UP(end
);
1244 index
= (ULONG
)(start
- MiNonPagedPoolStart
) / PAGE_SIZE
;
1247 if (!(MiNonPagedPoolAllocMap
[index
/ 32] & (1 << (index
% 32))))
1249 Status
= MmRequestPageMemoryConsumer(MC_NPPOOL
, FALSE
, &Page
);
1250 if (!NT_SUCCESS(Status
))
1255 Status
= MmCreateVirtualMapping(NULL
,
1257 PAGE_READWRITE
|PAGE_SYSTEM
,
1260 if (!NT_SUCCESS(Status
))
1262 DbgPrint("Unable to create virtual mapping\n");
1263 MmReleasePageMemoryConsumer(MC_NPPOOL
, Page
);
1267 MiNonPagedPoolAllocMap
[index
/ 32] |= (1 << (index
% 32));
1268 memset(start
, 0xcc, PAGE_SIZE
);
1269 MiNonPagedPoolNrOfPages
++;
1277 static BLOCK_HDR
* get_block(unsigned int size
, unsigned long alignment
)
1279 BLOCK_HDR
*blk
, *current
, *previous
= NULL
, *next
= NULL
, *best
= NULL
;
1280 ULONG previous_size
, current_size
, next_size
, new_size
;
1282 PVOID addr
, aligned_addr
;
1285 DPRINT("get_block %d\n", size
);
1289 p
= avl_find_equal_or_greater(FreeBlockListRoot
, size
, compare_value
);
1292 best
= CONTAINING_RECORD(p
, BLOCK_HDR
, Free
.Node
);
1293 addr
= block_to_address(best
);
1298 p
= avl_find_equal_or_greater(FreeBlockListRoot
, size
, compare_value
);
1302 current
= CONTAINING_RECORD(p
, BLOCK_HDR
, Free
.Node
);
1303 addr
= block_to_address(current
);
1304 /* calculate first aligned address available within this block */
1305 aligned_addr
= MM_ROUND_UP(addr
, alignment
);
1306 /* check to see if this address is already aligned */
1307 if (addr
== aligned_addr
)
1309 if (current
->Size
>= size
&&
1310 (best
== NULL
|| current
->Size
< best
->Size
))
1317 /* make sure there's enough room to make a free block by the space skipped
1318 * from alignment. If not, calculate forward to the next alignment
1319 * and see if we allocate there...
1321 new_size
= (ULONG
)aligned_addr
- (ULONG
)addr
+ size
;
1322 if ((ULONG
)aligned_addr
- (ULONG
)addr
< BLOCK_HDR_SIZE
)
1324 /* not enough room for a free block header, add some more bytes */
1325 aligned_addr
= MM_ROUND_UP(block_to_address((BLOCK_HDR
*)((char*)current
+ BLOCK_HDR_SIZE
)), alignment
);
1326 new_size
= (ULONG
)aligned_addr
- (ULONG
)addr
+ size
;
1328 if (current
->Size
>= new_size
&&
1329 (best
== NULL
|| current
->Size
< best
->Size
))
1334 if (best
&& current
->Size
>= size
+ alignment
+ 2 * BLOCK_HDR_SIZE
)
1338 p
= avl_get_next(FreeBlockListRoot
, p
);
1344 * We didn't find anything suitable at all.
1352 current_size
= current
->Size
;
1356 addr
= block_to_address(current
);
1357 aligned_addr
= MM_ROUND_UP(addr
, alignment
);
1358 if (addr
!= aligned_addr
)
1360 blk
= address_to_block(aligned_addr
);
1361 if ((char*)blk
< (char*)current
+ BLOCK_HDR_SIZE
)
1363 aligned_addr
= MM_ROUND_UP(block_to_address((BLOCK_HDR
*)((char*)current
+ BLOCK_HDR_SIZE
)), alignment
);
1364 blk
= address_to_block(aligned_addr
);
1367 * if size-aligned, break off the preceding bytes into their own block...
1370 previous_size
= (ULONG
)blk
- (ULONG
)previous
- BLOCK_HDR_SIZE
;
1372 current_size
-= ((ULONG
)current
- (ULONG
)previous
);
1376 end
= (PVOID
)current
+ BLOCK_HDR_SIZE
+ size
;
1378 if (current_size
>= size
+ BLOCK_HDR_SIZE
+ MM_POOL_ALIGNMENT
)
1380 /* create a new free block after our block, if the memory size is >= 4 byte for this block */
1381 next
= (BLOCK_HDR
*)((ULONG
)current
+ size
+ BLOCK_HDR_SIZE
);
1382 next_size
= current_size
- size
- BLOCK_HDR_SIZE
;
1383 current_size
= size
;
1384 end
= (PVOID
)next
+ BLOCK_HDR_SIZE
;
1389 remove_from_free_list(previous
);
1390 if (!grow_block(previous
, end
))
1392 add_to_free_list(previous
);
1395 memset(current
, 0, BLOCK_HDR_SIZE
);
1396 current
->Size
= current_size
;
1397 current
->Magic
= BLOCK_HDR_USED_MAGIC
;
1398 current
->previous
= previous
;
1399 previous
->Size
= previous_size
;
1402 blk
= (BLOCK_HDR
*)((char*)current
+ BLOCK_HDR_SIZE
+ current
->Size
);
1403 if ((PVOID
)blk
< MiNonPagedPoolStart
+ MiNonPagedPoolLength
)
1405 blk
->previous
= current
;
1409 add_to_free_list(previous
);
1413 remove_from_free_list(current
);
1415 if (!grow_block(current
, end
))
1417 add_to_free_list(current
);
1421 current
->Magic
= BLOCK_HDR_USED_MAGIC
;
1424 current
->Size
= current_size
;
1430 memset(next
, 0, BLOCK_HDR_SIZE
);
1432 next
->Size
= next_size
;
1433 next
->Magic
= BLOCK_HDR_FREE_MAGIC
;
1434 next
->previous
= current
;
1435 blk
= (BLOCK_HDR
*)((char*)next
+ BLOCK_HDR_SIZE
+ next
->Size
);
1436 if ((PVOID
)blk
< MiNonPagedPoolStart
+ MiNonPagedPoolLength
)
1438 blk
->previous
= next
;
1440 add_to_free_list(next
);
1443 add_to_used_list(current
);
1448 #endif /* not WHOLE_PAGE_ALLOCATIONS */
1450 VOID STDCALL
ExFreeNonPagedPool (PVOID block
)
1452 * FUNCTION: Releases previously allocated memory
1454 * block = block to free
1457 #ifdef WHOLE_PAGE_ALLOCATIONS /* WHOLE_PAGE_ALLOCATIONS */
1465 DPRINT("freeing block %x\n",blk
);
1467 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block
,blk
->size
,
1468 ((PULONG
)&block
)[-1]);
1470 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
1472 ExFreeWholePageBlock(block
);
1473 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
1475 #else /* not WHOLE_PAGE_ALLOCATIONS */
1477 BLOCK_HDR
* blk
=address_to_block(block
);
1485 DPRINT("freeing block %x\n",blk
);
1487 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block
,blk
->size
,
1488 ((PULONG
)&block
)[-1]);
1490 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
1494 if (blk
->Magic
!= BLOCK_HDR_USED_MAGIC
)
1496 if (blk
->Magic
== BLOCK_HDR_FREE_MAGIC
)
1498 DbgPrint("ExFreePool of already freed address %x\n", block
);
1502 DbgPrint("ExFreePool of non-allocated address %x (magic %x)\n",
1509 memset(block
, 0xcc, blk
->Size
);
1510 #ifdef TAG_STATISTICS_TRACKING
1511 MiRemoveFromTagHashTable(blk
);
1513 remove_from_used_list(blk
);
1514 blk
->Magic
= BLOCK_HDR_FREE_MAGIC
;
1515 add_to_free_list(blk
);
1517 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
1519 #endif /* WHOLE_PAGE_ALLOCATIONS */
1523 ExAllocateNonPagedPoolWithTag(ULONG Type
, ULONG Size
, ULONG Tag
, PVOID Caller
)
1525 #ifdef WHOLE_PAGE_ALLOCATIONS
1529 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
1532 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
1535 * accomodate this useful idiom
1539 POOL_TRACE("= NULL\n");
1540 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
1544 block
= ExAllocateWholePageBlock(Size
);
1545 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
1548 DPRINT1("Trying to allocate %lu bytes from nonpaged pool - nothing suitable found, returning NULL\n",
1553 #else /* not WHOLE_PAGE_ALLOCATIONS */
1555 BLOCK_HDR
* best
= NULL
;
1559 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
1562 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
1567 /* after some allocations print the npaged pool stats */
1568 #ifdef TAG_STATISTICS_TRACKING
1570 static ULONG counter
= 0;
1571 if (counter
++ % 100000 == 0)
1573 MiDebugDumpNonPagedPoolStats(FALSE
);
1579 * accomodate this useful idiom
1583 POOL_TRACE("= NULL\n");
1584 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
1587 /* Make the size dword alligned, this makes the block dword alligned */
1588 Size
= ROUND_UP(Size
, MM_POOL_ALIGNMENT
);
1590 if (Size
>= PAGE_SIZE
)
1592 alignment
= PAGE_SIZE
;
1594 else if (Type
== NonPagedPoolCacheAligned
||
1595 Type
== NonPagedPoolCacheAlignedMustS
)
1597 alignment
= MM_CACHE_LINE_SIZE
;
1604 best
= get_block(Size
, alignment
);
1607 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
1608 DPRINT1("Trying to allocate %lu bytes from nonpaged pool - nothing suitable found, returning NULL\n",
1612 best
->Used
.Tag
= Tag
;
1613 best
->Used
.Caller
= Caller
;
1614 best
->Used
.Dumped
= FALSE
;
1615 best
->Used
.TagListEntry
.Flink
= best
->Used
.TagListEntry
.Blink
= NULL
;
1616 #ifdef TAG_STATISTICS_TRACKING
1617 MiAddToTagHashTable(best
);
1619 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
1620 block
= block_to_address(best
);
1621 memset(block
,0,Size
);
1623 #endif /* WHOLE_PAGE_ALLOCATIONS */
1626 #ifdef WHOLE_PAGE_ALLOCATIONS
1629 ExAllocateWholePageBlock(ULONG Size
)
1632 PHYSICAL_ADDRESS Page
;
1637 NrPages
= ROUND_UP(Size
, PAGE_SIZE
) / PAGE_SIZE
;
1639 Base
= RtlFindClearBitsAndSet(&NonPagedPoolAllocMap
, NrPages
+ 1, NonPagedPoolAllocMapHint
);
1640 if (Base
== 0xffffffff)
1642 DbgPrint("Out of non paged pool space.\n");
1645 if (NonPagedPoolAllocMapHint
== Base
)
1647 NonPagedPoolAllocMapHint
+= (NrPages
+ 1);
1649 Address
= MiNonPagedPoolStart
+ Base
* PAGE_SIZE
;
1651 for (i
= 0; i
< NrPages
; i
++)
1653 Page
= MmAllocPage(MC_NPPOOL
, 0);
1654 if (Page
.QuadPart
== 0LL)
1658 MmCreateVirtualMapping(NULL
,
1659 Address
+ (i
* PAGE_SIZE
),
1660 PAGE_READWRITE
| PAGE_SYSTEM
,
1665 MiCurrentNonPagedPoolLength
= max(MiCurrentNonPagedPoolLength
, (Base
+ NrPages
) * PAGE_SIZE
);
1666 return((PVOID
)((PUCHAR
)Address
+ (NrPages
* PAGE_SIZE
) - Size
));
1670 ExFreeWholePageBlock(PVOID Addr
)
1674 if (Addr
< MiNonPagedPoolStart
||
1675 Addr
>= (MiNonPagedPoolStart
+ MiCurrentNonPagedPoolLength
))
1677 DbgPrint("Block %x found outside pool area\n", Addr
);
1680 Base
= (Addr
- MiNonPagedPoolStart
) / PAGE_SIZE
;
1681 NonPagedPoolAllocMapHint
= min(NonPagedPoolAllocMapHint
, Base
);
1682 while (MmIsPagePresent(NULL
, Addr
))
1684 MmDeleteVirtualMapping(NULL
,
1689 RtlClearBits(&NonPagedPoolAllocMap
, Base
, 1);
1695 #endif /* WHOLE_PAGE_ALLOCATIONS */
1698 MiInitializeNonPagedPool(VOID
)
1701 PHYSICAL_ADDRESS Page
;
1704 #ifdef WHOLE_PAGE_ALLOCATIONS
1708 #ifdef TAG_STATISTICS_TRACKING
1709 for (i
= 0; i
< TAG_HASH_TABLE_SIZE
; i
++)
1711 InitializeListHead(&tag_hash_table
[i
]);
1714 KeInitializeSpinLock(&MmNpoolLock
);
1715 InitializeListHead(&UsedBlockListHead
);
1716 InitializeListHead(&AddressListHead
);
1717 FreeBlockListRoot
= NULL
;
1718 #ifdef WHOLE_PAGE_ALLOCATIONS
1719 NonPagedPoolAllocMapHint
= PAGE_ROUND_UP(MiNonPagedPoolLength
/ PAGE_SIZE
/ 8) / PAGE_SIZE
;
1720 MiCurrentNonPagedPoolLength
= NonPagedPoolAllocMapHint
* PAGE_SIZE
;
1721 Address
= MiNonPagedPoolStart
;
1722 for (i
= 0; i
< NonPagedPoolAllocMapHint
; i
++)
1724 Status
= MmRequestPageMemoryConsumer(MC_NPPOOL
, FALSE
, &Page
);
1725 if (!NT_SUCCESS(Status
))
1727 DbgPrint("Unable to allocate a page\n");
1730 Status
= MmCreateVirtualMapping(NULL
,
1732 PAGE_READWRITE
|PAGE_SYSTEM
,
1735 if (!NT_SUCCESS(Status
))
1737 DbgPrint("Unable to create virtual mapping\n");
1740 Address
+= PAGE_SIZE
;
1742 RtlInitializeBitMap(&NonPagedPoolAllocMap
, MiNonPagedPoolStart
, MM_NONPAGED_POOL_SIZE
/ PAGE_SIZE
);
1743 RtlClearAllBits(&NonPagedPoolAllocMap
);
1744 RtlSetBits(&NonPagedPoolAllocMap
, 0, NonPagedPoolAllocMapHint
);
1746 MiNonPagedPoolAllocMap
= block_to_address((BLOCK_HDR
*)MiNonPagedPoolStart
);
1747 MiNonPagedPoolNrOfPages
= PAGE_ROUND_UP(ROUND_UP(MiNonPagedPoolLength
/ PAGE_SIZE
, 32) / 8 + 2 * BLOCK_HDR_SIZE
);
1748 MiNonPagedPoolNrOfPages
/= PAGE_SIZE
;
1749 Address
= MiNonPagedPoolStart
;
1750 for (i
= 0; i
< MiNonPagedPoolNrOfPages
; i
++)
1752 Status
= MmRequestPageMemoryConsumer(MC_NPPOOL
, FALSE
, &Page
);
1753 if (!NT_SUCCESS(Status
))
1755 DbgPrint("Unable to allocate a page\n");
1758 Status
= MmCreateVirtualMapping(NULL
,
1760 PAGE_READWRITE
|PAGE_SYSTEM
,
1763 if (!NT_SUCCESS(Status
))
1765 DbgPrint("Unable to create virtual mapping\n");
1768 MiNonPagedPoolAllocMap
[i
/ 32] |= (1 << (i
% 32));
1769 Address
+= PAGE_SIZE
;
1771 /* the first block contains the non paged pool bitmap */
1772 blk
= (BLOCK_HDR
*)MiNonPagedPoolStart
;
1773 memset(blk
, 0, BLOCK_HDR_SIZE
);
1774 blk
->Magic
= BLOCK_HDR_USED_MAGIC
;
1775 blk
->Size
= ROUND_UP(MiNonPagedPoolLength
/ PAGE_SIZE
, 32) / 8;
1776 blk
->previous
= NULL
;
1777 blk
->Used
.Tag
= 0xffffffff;
1778 blk
->Used
.Caller
= 0;
1779 blk
->Used
.Dumped
= FALSE
;
1780 add_to_used_list(blk
);
1781 #ifdef TAG_STATISTICS_TRACKING
1782 MiAddToTagHashTable(blk
);
1784 /* the second block is the first free block */
1785 blk
= (BLOCK_HDR
*)((char*)blk
+ BLOCK_HDR_SIZE
+ blk
->Size
);
1786 memset(blk
, 0, BLOCK_HDR_SIZE
);
1787 memset((PVOID
)blk
+ BLOCK_HDR_SIZE
, 0x0cc, MiNonPagedPoolNrOfPages
* PAGE_SIZE
- ((ULONG
)blk
+ BLOCK_HDR_SIZE
- (ULONG
)MiNonPagedPoolStart
));
1788 blk
->Magic
= BLOCK_HDR_FREE_MAGIC
;
1789 blk
->Size
= MiNonPagedPoolLength
- ((ULONG
)blk
+ BLOCK_HDR_SIZE
- (ULONG
)MiNonPagedPoolStart
);
1790 blk
->previous
= (BLOCK_HDR
*)MiNonPagedPoolStart
;
1791 add_to_free_list(blk
);