1 /* $Id: npool.c,v 1.64 2002/11/10 18:17:42 chorns Exp $
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/npool.c
6 * PURPOSE: Implements the kernel memory pool
7 * PROGRAMMER: David Welch (welch@cwcom.net)
10 * 10/06/98: Bug fixes by Iwan Fatahi (i_fatahi@hotmail.com)
11 * in take_block (if current bigger than required)
12 * in remove_from_used_list
14 * 23/08/98: Fixes from Robert Bergkvist (fragdance@hotmail.com)
17 /* INCLUDES ****************************************************************/
19 #include <ddk/ntddk.h>
20 #include <internal/mm.h>
21 #include <internal/ntoskrnl.h>
22 #include <internal/pool.h>
25 #include <internal/debug.h>
27 /* Enable strict checking of the nonpaged pool on every allocation */
28 //#define ENABLE_VALIDATE_POOL
30 /* Enable tracking of statistics about the tagged blocks in the pool */
31 #define TAG_STATISTICS_TRACKING
34 * Put each block in its own range of pages and position the block at the
35 * end of the range so any accesses beyond the end of block are to invalid
38 //#define WHOLE_PAGE_ALLOCATIONS
40 #ifdef ENABLE_VALIDATE_POOL
41 #define VALIDATE_POOL validate_kernel_pool()
47 #define POOL_TRACE(args...) do { DbgPrint(args); } while(0);
49 #define POOL_TRACE(args...)
52 /* TYPES *******************************************************************/
54 #define BLOCK_HDR_USED_MAGIC (0xdeadbeef)
55 #define BLOCK_HDR_FREE_MAGIC (0xceadbeef)
58 * fields present at the start of a block (this is for internal use only)
60 typedef struct _BLOCK_HDR
67 struct _BLOCK_HDR
* tag_next
;
72 ExAllocateWholePageBlock(ULONG Size
);
74 ExFreeWholePageBlock(PVOID Addr
);
76 /* GLOBALS *****************************************************************/
79 * Memory managment initalized symbol for the base of the pool
81 static unsigned int kernel_pool_base
= 0;
84 * Head of the list of free blocks
86 static LIST_ENTRY FreeBlockListHead
;
89 * Head of the list of in use block
91 static LIST_ENTRY UsedBlockListHead
;
93 #ifndef WHOLE_PAGE_ALLOCATIONS
95 * Count of free blocks
97 static ULONG EiNrFreeBlocks
= 0;
100 * Count of used blocks
102 static ULONG EiNrUsedBlocks
= 0;
106 * Lock that protects the non-paged pool data structures
108 static KSPIN_LOCK MmNpoolLock
;
111 * Total memory used for free nonpaged pool blocks
113 ULONG EiFreeNonPagedPool
= 0;
116 * Total memory used for nonpaged pool blocks
118 ULONG EiUsedNonPagedPool
= 0;
121 * Allocate a range of memory in the nonpaged pool
124 MiAllocNonPagedPoolRegion(unsigned int nr_pages
);
127 MiFreeNonPagedPoolRegion(PVOID Addr
, ULONG Count
, BOOLEAN Free
);
129 #ifdef TAG_STATISTICS_TRACKING
130 #define TAG_HASH_TABLE_SIZE (1024)
131 static BLOCK_HDR
* tag_hash_table
[TAG_HASH_TABLE_SIZE
];
132 #endif /* TAG_STATISTICS_TRACKING */
134 /* FUNCTIONS ***************************************************************/
136 #ifdef TAG_STATISTICS_TRACKING
138 MiRemoveFromTagHashTable(BLOCK_HDR
* block
)
140 * Remove a block from the tag hash table
152 hash
= block
->Tag
% TAG_HASH_TABLE_SIZE
;
155 current
= tag_hash_table
[hash
];
156 while (current
!= NULL
)
158 if (current
== block
)
160 if (previous
== NULL
)
162 tag_hash_table
[hash
] = block
->tag_next
;
166 previous
->tag_next
= block
->tag_next
;
171 current
= current
->tag_next
;
173 DPRINT1("Tagged block wasn't on hash table list (Tag %x Caller %x)\n",
174 block
->Tag
, block
->Caller
);
179 MiAddToTagHashTable(BLOCK_HDR
* block
)
181 * Add a block to the tag hash table
193 hash
= block
->Tag
% TAG_HASH_TABLE_SIZE
;
196 current
= tag_hash_table
[hash
];
197 while (current
!= NULL
)
199 if (current
->Tag
== block
->Tag
)
201 block
->tag_next
= current
->tag_next
;
202 current
->tag_next
= block
;
206 if (current
->tag_next
&&((PVOID
)current
->tag_next
>= (PVOID
)kernel_pool_base
+ NONPAGED_POOL_SIZE
|| (PVOID
)current
->tag_next
< (PVOID
)kernel_pool_base
))
208 DbgPrint("previous %x\n", previous
);
210 current
= current
->tag_next
;
212 block
->tag_next
= NULL
;
213 if (previous
== NULL
)
215 tag_hash_table
[hash
] = block
;
219 previous
->tag_next
= block
;
222 #endif /* TAG_STATISTICS_TRACKING */
225 ExInitNonPagedPool(ULONG BaseAddress
)
227 kernel_pool_base
= BaseAddress
;
228 KeInitializeSpinLock(&MmNpoolLock
);
229 MmInitKernelMap((PVOID
)BaseAddress
);
230 memset(tag_hash_table
, 0, sizeof(tag_hash_table
));
231 InitializeListHead(&FreeBlockListHead
);
232 InitializeListHead(&UsedBlockListHead
);
235 #ifdef TAG_STATISTICS_TRACKING
237 MiDumpTagStats(ULONG CurrentTag
, ULONG CurrentNrBlocks
, ULONG CurrentSize
)
241 c1
= (CurrentTag
>> 24) & 0xFF;
242 c2
= (CurrentTag
>> 16) & 0xFF;
243 c3
= (CurrentTag
>> 8) & 0xFF;
244 c4
= CurrentTag
& 0xFF;
246 if (isprint(c1
) && isprint(c2
) && isprint(c3
) && isprint(c4
))
248 DbgPrint("Tag %x (%c%c%c%c) Blocks %d Total Size %d Average Size %d\n",
249 CurrentTag
, c4
, c3
, c2
, c1
, CurrentNrBlocks
,
250 CurrentSize
, CurrentSize
/ CurrentNrBlocks
);
254 DbgPrint("Tag %x Blocks %d Total Size %d Average Size %d\n",
255 CurrentTag
, CurrentNrBlocks
, CurrentSize
,
256 CurrentSize
/ CurrentNrBlocks
);
259 #endif /* TAG_STATISTICS_TRACKING */
262 MiDebugDumpNonPagedPoolStats(BOOLEAN NewOnly
)
264 #ifdef TAG_STATISTICS_TRACKING
268 ULONG CurrentNrBlocks
;
273 DbgPrint("******* Dumping non paging pool stats ******\n");
276 for (i
= 0; i
< TAG_HASH_TABLE_SIZE
; i
++)
281 current
= tag_hash_table
[i
];
282 while (current
!= NULL
)
284 if (current
->Tag
!= CurrentTag
)
286 if (CurrentTag
!= 0 && CurrentNrBlocks
!= 0)
288 MiDumpTagStats(CurrentTag
, CurrentNrBlocks
, CurrentSize
);
290 CurrentTag
= current
->Tag
;
295 if (!NewOnly
|| !current
->Dumped
)
299 CurrentSize
= CurrentSize
+ current
->Size
;
300 TotalSize
= TotalSize
+ current
->Size
;
301 current
->Dumped
= TRUE
;
303 current
= current
->tag_next
;
305 if (CurrentTag
!= 0 && CurrentNrBlocks
!= 0)
307 MiDumpTagStats(CurrentTag
, CurrentNrBlocks
, CurrentSize
);
310 if (TotalBlocks
!= 0)
312 DbgPrint("TotalBlocks %d TotalSize %d AverageSize %d\n",
313 TotalBlocks
, TotalSize
, TotalSize
/ TotalBlocks
);
317 DbgPrint("TotalBlocks %d TotalSize %d\n",
318 TotalBlocks
, TotalSize
);
320 DbgPrint("***************** Dump Complete ***************\n");
321 #endif /* TAG_STATISTICS_TRACKING */
325 MiDebugDumpNonPagedPool(BOOLEAN NewOnly
)
328 PLIST_ENTRY current_entry
;
331 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
333 DbgPrint("******* Dumping non paging pool contents ******\n");
334 current_entry
= UsedBlockListHead
.Flink
;
335 while (current_entry
!= &UsedBlockListHead
)
337 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
338 if (!NewOnly
|| !current
->Dumped
)
342 c1
= (current
->Tag
>> 24) & 0xFF;
343 c2
= (current
->Tag
>> 16) & 0xFF;
344 c3
= (current
->Tag
>> 8) & 0xFF;
345 c4
= current
->Tag
& 0xFF;
347 if (isprint(c1
) && isprint(c2
) && isprint(c3
) && isprint(c4
))
349 DbgPrint("Size 0x%x Tag 0x%x (%c%c%c%c) Allocator 0x%x\n",
350 current
->Size
, current
->Tag
, c4
, c3
, c2
, c1
,
355 DbgPrint("Size 0x%x Tag 0x%x Allocator 0x%x\n",
356 current
->Size
, current
->Tag
, current
->Caller
);
358 current
->Dumped
= TRUE
;
360 current_entry
= current_entry
->Flink
;
362 DbgPrint("***************** Dump Complete ***************\n");
363 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
366 #ifndef WHOLE_PAGE_ALLOCATIONS
368 #ifdef ENABLE_VALIDATE_POOL
369 static void validate_free_list(void)
371 * FUNCTION: Validate the integrity of the list of free blocks
375 PLIST_ENTRY current_entry
;
376 unsigned int blocks_seen
=0;
378 current_entry
= FreeBlockListHead
.Flink
;
379 while (current_entry
!= &FreeBlockListHead
)
381 unsigned int base_addr
;
383 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
384 base_addr
= (int)current
;
386 if (current
->Magic
!= BLOCK_HDR_FREE_MAGIC
)
388 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
390 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT
);
393 if (base_addr
< (kernel_pool_base
) ||
394 (base_addr
+current
->Size
) > (kernel_pool_base
)+NONPAGED_POOL_SIZE
)
396 DbgPrint("Block %x found outside pool area\n",current
);
397 DbgPrint("Size %d\n",current
->Size
);
398 DbgPrint("Limits are %x %x\n",kernel_pool_base
,
399 kernel_pool_base
+NONPAGED_POOL_SIZE
);
400 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT
);
403 if (blocks_seen
> EiNrFreeBlocks
)
405 DbgPrint("Too many blocks on free list\n");
406 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT
);
408 if (current
->ListEntry
.Flink
!= &FreeBlockListHead
&&
409 current
->ListEntry
.Flink
->Blink
!= ¤t
->ListEntry
)
411 DbgPrint("%s:%d:Break in list (current %x next %x "
412 "current->next->previous %x)\n",
413 __FILE__
,__LINE__
,current
, current
->ListEntry
.Flink
,
414 current
->ListEntry
.Flink
->Blink
);
415 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT
);
418 current_entry
= current_entry
->Flink
;
422 static void validate_used_list(void)
424 * FUNCTION: Validate the integrity of the list of used blocks
428 PLIST_ENTRY current_entry
;
429 unsigned int blocks_seen
=0;
431 current_entry
= UsedBlockListHead
.Flink
;
432 while (current_entry
!= &UsedBlockListHead
)
434 unsigned int base_addr
;
436 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
437 base_addr
= (int)current
;
439 if (current
->Magic
!= BLOCK_HDR_USED_MAGIC
)
441 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
443 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT
);
445 if (base_addr
< (kernel_pool_base
) ||
446 (base_addr
+current
->Size
) >
447 (kernel_pool_base
)+NONPAGED_POOL_SIZE
)
449 DbgPrint("Block %x found outside pool area\n",current
);
453 if (blocks_seen
> EiNrUsedBlocks
)
455 DbgPrint("Too many blocks on used list\n");
458 if (current
->ListEntry
.Flink
!= &UsedBlockListHead
&&
459 current
->ListEntry
.Flink
->Blink
!= ¤t
->ListEntry
)
461 DbgPrint("Break in list (current %x next %x)\n",
462 current
, current
->ListEntry
.Flink
);
466 current_entry
= current_entry
->Flink
;
470 static void check_duplicates(BLOCK_HDR
* blk
)
472 * FUNCTION: Check a block has no duplicates
474 * blk = block to check
475 * NOTE: Bug checks if duplicates are found
478 unsigned int base
= (int)blk
;
479 unsigned int last
= ((int)blk
) + +sizeof(BLOCK_HDR
) + blk
->Size
;
481 PLIST_ENTRY current_entry
;
483 current_entry
= FreeBlockListHead
.Flink
;
484 while (current_entry
!= &FreeBlockListHead
)
486 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
488 if (current
->Magic
!= BLOCK_HDR_FREE_MAGIC
)
490 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
492 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT
);
495 if ( (int)current
> base
&& (int)current
< last
)
497 DbgPrint("intersecting blocks on list\n");
500 if ( (int)current
< base
&&
501 ((int)current
+ current
->Size
+ sizeof(BLOCK_HDR
))
504 DbgPrint("intersecting blocks on list\n");
508 current_entry
= current_entry
->Flink
;
511 current_entry
= UsedBlockListHead
.Flink
;
512 while (current_entry
!= &UsedBlockListHead
)
514 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
516 if ( (int)current
> base
&& (int)current
< last
)
518 DbgPrint("intersecting blocks on list\n");
521 if ( (int)current
< base
&&
522 ((int)current
+ current
->Size
+ sizeof(BLOCK_HDR
))
525 DbgPrint("intersecting blocks on list\n");
529 current_entry
= current_entry
->Flink
;
534 static void validate_kernel_pool(void)
536 * FUNCTION: Checks the integrity of the kernel memory heap
540 PLIST_ENTRY current_entry
;
542 validate_free_list();
543 validate_used_list();
545 current_entry
= FreeBlockListHead
.Flink
;
546 while (current_entry
!= &FreeBlockListHead
)
548 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
549 check_duplicates(current
);
550 current_entry
= current_entry
->Flink
;
552 current_entry
= UsedBlockListHead
.Flink
;
553 while (current_entry
!= &UsedBlockListHead
)
555 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
556 check_duplicates(current
);
557 current_entry
= current_entry
->Flink
;
564 free_pages(BLOCK_HDR
* blk
)
571 end
= (ULONG
)blk
+ sizeof(BLOCK_HDR
) + blk
->Size
;
574 * If the block doesn't contain a whole page then there is nothing to do
576 if (PAGE_ROUND_UP(start
) >= PAGE_ROUND_DOWN(end
))
584 merge_free_block(BLOCK_HDR
* blk
)
586 PLIST_ENTRY next_entry
;
588 PLIST_ENTRY previous_entry
;
591 next_entry
= blk
->ListEntry
.Flink
;
592 if (next_entry
!= &FreeBlockListHead
)
594 next
= CONTAINING_RECORD(next_entry
, BLOCK_HDR
, ListEntry
);
595 if (((unsigned int)blk
+ sizeof(BLOCK_HDR
) + blk
->Size
) ==
598 RemoveEntryList(&next
->ListEntry
);
599 blk
->Size
= blk
->Size
+ next
->Size
;
600 memset(next
, 0xcc, sizeof(BLOCK_HDR
));
601 EiFreeNonPagedPool
+= sizeof(BLOCK_HDR
);
606 previous_entry
= blk
->ListEntry
.Blink
;
607 if (previous_entry
!= &FreeBlockListHead
)
609 previous
= CONTAINING_RECORD(previous_entry
, BLOCK_HDR
, ListEntry
);
610 if (((unsigned int)previous
+ sizeof(BLOCK_HDR
) + previous
->Size
) ==
613 RemoveEntryList(&blk
->ListEntry
);
614 previous
->Size
= previous
->Size
+ sizeof(BLOCK_HDR
) + blk
->Size
;
615 memset(blk
, 0xcc, sizeof(BLOCK_HDR
));
616 EiFreeNonPagedPool
+= sizeof(BLOCK_HDR
);
623 add_to_free_list(BLOCK_HDR
* blk
)
625 * FUNCTION: add the block to the free list (internal)
628 PLIST_ENTRY current_entry
;
631 current_entry
= FreeBlockListHead
.Flink
;
632 while (current_entry
!= &FreeBlockListHead
)
634 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
636 if ((unsigned int)current
> (unsigned int)blk
)
638 blk
->ListEntry
.Flink
= current_entry
;
639 blk
->ListEntry
.Blink
= current_entry
->Blink
;
640 current_entry
->Blink
->Flink
= &blk
->ListEntry
;
641 current_entry
->Blink
= &blk
->ListEntry
;
642 EiFreeNonPagedPool
+= blk
->Size
;
647 current_entry
= current_entry
->Flink
;
649 InsertTailList(&FreeBlockListHead
, &blk
->ListEntry
);
650 EiFreeNonPagedPool
+= blk
->Size
;
654 static void add_to_used_list(BLOCK_HDR
* blk
)
656 * FUNCTION: add the block to the used list (internal)
659 InsertHeadList(&UsedBlockListHead
, &blk
->ListEntry
);
660 EiUsedNonPagedPool
+= blk
->Size
;
665 static void remove_from_free_list(BLOCK_HDR
* current
)
667 RemoveEntryList(¤t
->ListEntry
);
668 EiFreeNonPagedPool
-= current
->Size
;
673 static void remove_from_used_list(BLOCK_HDR
* current
)
675 RemoveEntryList(¤t
->ListEntry
);
676 EiUsedNonPagedPool
-= current
->Size
;
681 inline static void* block_to_address(BLOCK_HDR
* blk
)
683 * FUNCTION: Translate a block header address to the corresponding block
687 return ( (void *) ((int)blk
+ sizeof(BLOCK_HDR
)) );
690 inline static BLOCK_HDR
* address_to_block(void* addr
)
693 ( ((int)addr
) - sizeof(BLOCK_HDR
) );
696 static BLOCK_HDR
* lookup_block(unsigned int size
)
698 PLIST_ENTRY current_entry
;
700 BLOCK_HDR
* best
= NULL
;
702 PVOID block
, block_boundary
;
704 current_entry
= FreeBlockListHead
.Flink
;
705 if (size
< PAGE_SIZE
)
707 while (current_entry
!= &FreeBlockListHead
)
709 DPRINT("current %x size %x tag_next %x\n",
710 current
, current
->Size
, current
->tag_next
);
711 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
712 if (current
->Size
>= size
&&
713 (best
== NULL
|| current
->Size
< best
->Size
))
717 current_entry
= current_entry
->Flink
;
722 while (current_entry
!= &FreeBlockListHead
)
724 DPRINT("current %x size %x tag_next %x\n",
725 current
, current
->Size
, current
->tag_next
);
726 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
728 block
= block_to_address(current
);
729 block_boundary
= (PVOID
)PAGE_ROUND_UP((ULONG
)block
);
730 new_size
= (ULONG
)block_boundary
- (ULONG
)block
+ size
;
731 if (new_size
!= size
&& (ULONG
)block_boundary
- (ULONG
)block
< sizeof(BLOCK_HDR
))
733 new_size
+= PAGE_SIZE
;
735 if (current
->Size
>= new_size
&&
736 (best
== NULL
|| current
->Size
< best
->Size
))
740 current_entry
= current_entry
->Flink
;
746 static void* take_block(BLOCK_HDR
* current
, unsigned int size
,
747 ULONG Tag
, PVOID Caller
)
749 * FUNCTION: Allocate a used block of least 'size' from the specified
751 * RETURNS: The address of the created memory block
756 if (size
>= PAGE_SIZE
)
758 blk
= address_to_block((PVOID
)PAGE_ROUND_UP(block_to_address (current
)));
761 if ((ULONG
)blk
- (ULONG
)current
< sizeof(BLOCK_HDR
))
763 (ULONG
)blk
+= PAGE_SIZE
;
765 assert((ULONG
)blk
- (ULONG
)current
+ size
<= current
->Size
&& (ULONG
)blk
- (ULONG
)current
>= sizeof(BLOCK_HDR
));
767 memset(blk
, 0, sizeof(BLOCK_HDR
));
768 blk
->Magic
= BLOCK_HDR_FREE_MAGIC
;
769 blk
->Size
= current
->Size
- ((ULONG
)blk
- (ULONG
)current
);
770 current
->Size
-= (blk
->Size
+ sizeof(BLOCK_HDR
));
771 InsertHeadList(¤t
->ListEntry
, &blk
->ListEntry
);
772 EiFreeNonPagedPool
-= sizeof(BLOCK_HDR
);
778 * If the block is much bigger than required then split it and
779 * return a pointer to the allocated section. If the difference
780 * between the sizes is marginal it makes no sense to have the
783 if (current
->Size
> size
+ sizeof(BLOCK_HDR
))
787 EiFreeNonPagedPool
-= current
->Size
;
790 * Replace the bigger block with a smaller block in the
791 * same position in the list
793 free_blk
= (BLOCK_HDR
*)(((int)current
)
794 + sizeof(BLOCK_HDR
) + size
);
795 free_blk
->Magic
= BLOCK_HDR_FREE_MAGIC
;
796 InsertHeadList(¤t
->ListEntry
, &free_blk
->ListEntry
);
797 free_blk
->Size
= current
->Size
- (sizeof(BLOCK_HDR
) + size
);
800 RemoveEntryList(¤t
->ListEntry
);
801 add_to_used_list(current
);
802 current
->Magic
= BLOCK_HDR_USED_MAGIC
;
804 current
->Caller
= Caller
;
805 current
->Dumped
= FALSE
;
806 #ifdef TAG_STATISTICS_TRACKING
807 MiAddToTagHashTable(current
);
808 #endif /* TAG_STATISTICS_TRACKING */
810 EiFreeNonPagedPool
+= free_blk
->Size
;
813 return(block_to_address(current
));
817 * Otherwise allocate the whole block
819 remove_from_free_list(current
);
820 add_to_used_list(current
);
822 current
->Magic
= BLOCK_HDR_USED_MAGIC
;
824 current
->Caller
= Caller
;
825 current
->Dumped
= FALSE
;
826 #ifdef TAG_STATISTICS_TRACKING
827 MiAddToTagHashTable(current
);
828 #endif /* TAG_STATISTICS_TRACKING */
831 return(block_to_address(current
));
834 static void* grow_kernel_pool(unsigned int size
, ULONG Tag
, PVOID Caller
)
836 * FUNCTION: Grow the executive heap to accomodate a block of at least 'size'
840 ULONG nr_pages
= PAGE_ROUND_UP(size
+ sizeof(BLOCK_HDR
)) / PAGE_SIZE
;
848 if (size
>= PAGE_SIZE
)
853 start
= (ULONG
)MiAllocNonPagedPoolRegion(nr_pages
);
855 DPRINT("growing heap for block size %d, ",size
);
856 DPRINT("start %x\n",start
);
858 for (i
=0;i
<nr_pages
;i
++)
860 PHYSICAL_ADDRESS Page
;
861 /* FIXME: Check whether we can really wait here. */
862 Status
= MmRequestPageMemoryConsumer(MC_NPPOOL
, TRUE
, &Page
);
863 if (!NT_SUCCESS(Status
))
868 Status
= MmCreateVirtualMapping(NULL
,
869 (PVOID
)(start
+ (i
*PAGE_SIZE
)),
870 PAGE_READWRITE
|PAGE_SYSTEM
,
873 if (!NT_SUCCESS(Status
))
875 DbgPrint("Unable to create virtual mapping\n");
880 blk
= (struct _BLOCK_HDR
*)start
;
881 memset(blk
, 0, sizeof(BLOCK_HDR
));
882 blk
->Size
= (nr_pages
* PAGE_SIZE
) - sizeof(BLOCK_HDR
);
883 blk
->Magic
= BLOCK_HDR_FREE_MAGIC
;
884 memset(block_to_address(blk
), 0xcc, blk
->Size
);
886 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
887 add_to_free_list(blk
);
888 merge_free_block(blk
);
890 blk
= lookup_block(size
);
893 block
= take_block(blk
, size
, Tag
, Caller
);
896 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
904 #endif /* not WHOLE_PAGE_ALLOCATIONS */
906 VOID STDCALL
ExFreeNonPagedPool (PVOID block
)
908 * FUNCTION: Releases previously allocated memory
910 * block = block to free
913 #ifdef WHOLE_PAGE_ALLOCATIONS /* WHOLE_PAGE_ALLOCATIONS */
921 DPRINT("freeing block %x\n",blk
);
923 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block
,blk
->size
,
924 ((PULONG
)&block
)[-1]);
926 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
928 ExFreeWholePageBlock(block
);
929 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
931 #else /* not WHOLE_PAGE_ALLOCATIONS */
933 BLOCK_HDR
* blk
=address_to_block(block
);
941 DPRINT("freeing block %x\n",blk
);
943 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block
,blk
->size
,
944 ((PULONG
)&block
)[-1]);
946 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
950 if (blk
->Magic
!= BLOCK_HDR_USED_MAGIC
)
952 if (blk
->Magic
== BLOCK_HDR_FREE_MAGIC
)
954 DbgPrint("ExFreePool of already freed address %x\n", block
);
958 DbgPrint("ExFreePool of non-allocated address %x (magic %x)\n",
965 memset(block
, 0xcc, blk
->Size
);
967 #ifdef TAG_STATISTICS_TRACKING
968 MiRemoveFromTagHashTable(blk
);
969 #endif /* TAG_STATISTICS_TRACKING */
970 remove_from_used_list(blk
);
971 blk
->Magic
= BLOCK_HDR_FREE_MAGIC
;
974 blk
->tag_next
= NULL
;
975 add_to_free_list(blk
);
976 merge_free_block(blk
);
979 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
981 #endif /* WHOLE_PAGE_ALLOCATIONS */
985 ExAllocateNonPagedPoolWithTag(ULONG Type
, ULONG Size
, ULONG Tag
, PVOID Caller
)
987 #ifdef WHOLE_PAGE_ALLOCATIONS
991 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
994 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
997 * accomodate this useful idiom
1001 POOL_TRACE("= NULL\n");
1002 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
1006 block
= ExAllocateWholePageBlock(Size
);
1007 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
1010 #else /* not WHOLE_PAGE_ALLOCATIONS */
1012 BLOCK_HDR
* best
= NULL
;
1015 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
1018 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
1023 /* after some allocations print the npaged pool stats */
1024 #ifdef TAG_STATISTICS_TRACKING
1026 static ULONG counter
= 0;
1027 if (counter
++ % 100000 == 0)
1029 MiDebugDumpNonPagedPoolStats(FALSE
);
1035 * accomodate this useful idiom
1039 POOL_TRACE("= NULL\n");
1040 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
1043 /* Make the size dword alligned, this makes the block dword alligned */
1044 Size
= ROUND_UP(Size
, 4);
1046 * Look for an already created block of sufficent size
1048 best
= lookup_block(Size
);
1051 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
1052 block
= grow_kernel_pool(Size
, Tag
, Caller
);
1053 assert(block
!= NULL
);
1054 memset(block
,0,Size
);
1058 block
=take_block(best
, Size
, Tag
, Caller
);
1060 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
1061 memset(block
,0,Size
);
1064 #endif /* WHOLE_PAGE_ALLOCATIONS */
1067 #ifdef WHOLE_PAGE_ALLOCATIONS
1070 ExAllocateWholePageBlock(ULONG UserSize
)
1073 PHYSICAL_ADDRESS Page
;
1078 Size
= sizeof(ULONG
) + UserSize
;
1079 NrPages
= ROUND_UP(Size
, PAGE_SIZE
) / PAGE_SIZE
;
1081 Address
= MiAllocNonPagedPoolRegion(NrPages
+ 1);
1083 for (i
= 0; i
< NrPages
; i
++)
1085 Page
= MmAllocPage(MC_NPPOOL
, 0);
1086 if (Page
.QuadPart
== 0LL)
1090 MmCreateVirtualMapping(NULL
,
1091 Address
+ (i
* PAGE_SIZE
),
1092 PAGE_READWRITE
| PAGE_SYSTEM
,
1097 *((PULONG
)((ULONG
)Address
+ (NrPages
* PAGE_SIZE
) - Size
)) = NrPages
;
1098 return((PVOID
)((ULONG
)Address
+ (NrPages
* PAGE_SIZE
) - UserSize
));
1102 ExFreeWholePageBlock(PVOID Addr
)
1106 if ((ULONG
)Addr
< kernel_pool_base
||
1107 (ULONG
)Addr
>= (kernel_pool_base
+ NONPAGED_POOL_SIZE
))
1109 DbgPrint("Block %x found outside pool area\n", Addr
);
1112 NrPages
= *(PULONG
)((ULONG
)Addr
- sizeof(ULONG
));
1113 MiFreeNonPagedPoolRegion((PVOID
)PAGE_ROUND_DOWN((ULONG
)Addr
), NrPages
, TRUE
);
1116 #endif /* WHOLE_PAGE_ALLOCATIONS */