1 /* $Id: npool.c,v 1.60 2002/09/07 15:13:00 chorns Exp $
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/npool.c
6 * PURPOSE: Implements the kernel memory pool
7 * PROGRAMMER: David Welch (welch@cwcom.net)
10 * 10/06/98: Bug fixes by Iwan Fatahi (i_fatahi@hotmail.com)
11 * in take_block (if current bigger than required)
12 * in remove_from_used_list
14 * 23/08/98: Fixes from Robert Bergkvist (fragdance@hotmail.com)
17 /* INCLUDES ****************************************************************/
22 #include <internal/debug.h>
24 /* Enable strict checking of the nonpaged pool on every allocation */
25 //#define ENABLE_VALIDATE_POOL
27 /* Enable tracking of statistics about the tagged blocks in the pool */
28 #define TAG_STATISTICS_TRACKING
31 * Put each block in its own range of pages and position the block at the
32 * end of the range so any accesses beyond the end of block are to invalid
35 //#define WHOLE_PAGE_ALLOCATIONS
37 #ifdef ENABLE_VALIDATE_POOL
38 #define VALIDATE_POOL validate_kernel_pool()
44 #define POOL_TRACE(args...) do { DbgPrint(args); } while(0);
46 #define POOL_TRACE(args...)
49 /* TYPES *******************************************************************/
51 #define BLOCK_HDR_USED_MAGIC (0xdeadbeef)
52 #define BLOCK_HDR_FREE_MAGIC (0xceadbeef)
55 * fields present at the start of a block (this is for internal use only)
57 typedef struct _BLOCK_HDR
64 struct _BLOCK_HDR
* tag_next
;
69 ExAllocateWholePageBlock(ULONG Size
);
71 ExFreeWholePageBlock(PVOID Addr
);
73 /* GLOBALS *****************************************************************/
76 * Memory managment initalized symbol for the base of the pool
78 static unsigned int kernel_pool_base
= 0;
81 * Head of the list of free blocks
83 static LIST_ENTRY FreeBlockListHead
;
86 * Head of the list of in use block
88 static LIST_ENTRY UsedBlockListHead
;
90 #ifndef WHOLE_PAGE_ALLOCATIONS
92 * Count of free blocks
94 static ULONG EiNrFreeBlocks
= 0;
97 * Count of used blocks
99 static ULONG EiNrUsedBlocks
= 0;
103 * Lock that protects the non-paged pool data structures
105 static KSPIN_LOCK MmNpoolLock
;
108 * Total memory used for free nonpaged pool blocks
110 ULONG EiFreeNonPagedPool
= 0;
113 * Total memory used for nonpaged pool blocks
115 ULONG EiUsedNonPagedPool
= 0;
118 * Allocate a range of memory in the nonpaged pool
121 MiAllocNonPagedPoolRegion(unsigned int nr_pages
);
124 MiFreeNonPagedPoolRegion(PVOID Addr
, ULONG Count
, BOOLEAN Free
);
126 #ifdef TAG_STATISTICS_TRACKING
127 #define TAG_HASH_TABLE_SIZE (1024)
128 static BLOCK_HDR
* tag_hash_table
[TAG_HASH_TABLE_SIZE
];
129 #endif /* TAG_STATISTICS_TRACKING */
131 /* FUNCTIONS ***************************************************************/
133 #ifdef TAG_STATISTICS_TRACKING
135 MiRemoveFromTagHashTable(BLOCK_HDR
* block
)
137 * Remove a block from the tag hash table
149 hash
= block
->Tag
% TAG_HASH_TABLE_SIZE
;
152 current
= tag_hash_table
[hash
];
153 while (current
!= NULL
)
155 if (current
== block
)
157 if (previous
== NULL
)
159 tag_hash_table
[hash
] = block
->tag_next
;
163 previous
->tag_next
= block
->tag_next
;
168 current
= current
->tag_next
;
170 DPRINT1("Tagged block wasn't on hash table list (Tag %x Caller %x)\n",
171 block
->Tag
, block
->Caller
);
176 MiAddToTagHashTable(BLOCK_HDR
* block
)
178 * Add a block to the tag hash table
190 hash
= block
->Tag
% TAG_HASH_TABLE_SIZE
;
193 current
= tag_hash_table
[hash
];
194 while (current
!= NULL
)
196 if (current
->Tag
== block
->Tag
)
198 block
->tag_next
= current
->tag_next
;
199 current
->tag_next
= block
;
203 current
= current
->tag_next
;
205 block
->tag_next
= NULL
;
206 if (previous
== NULL
)
208 tag_hash_table
[hash
] = block
;
212 previous
->tag_next
= block
;
215 #endif /* TAG_STATISTICS_TRACKING */
218 ExInitNonPagedPool(ULONG BaseAddress
)
220 kernel_pool_base
= BaseAddress
;
221 KeInitializeSpinLock(&MmNpoolLock
);
222 MmInitKernelMap((PVOID
)BaseAddress
);
223 memset(tag_hash_table
, 0, sizeof(tag_hash_table
));
224 InitializeListHead(&FreeBlockListHead
);
225 InitializeListHead(&UsedBlockListHead
);
228 #ifdef TAG_STATISTICS_TRACKING
230 MiDumpTagStats(ULONG CurrentTag
, ULONG CurrentNrBlocks
, ULONG CurrentSize
)
234 c1
= (CurrentTag
>> 24) & 0xFF;
235 c2
= (CurrentTag
>> 16) & 0xFF;
236 c3
= (CurrentTag
>> 8) & 0xFF;
237 c4
= CurrentTag
& 0xFF;
239 if (isprint(c1
) && isprint(c2
) && isprint(c3
) && isprint(c4
))
241 DbgPrint("Tag %x (%c%c%c%c) Blocks %d Total Size %d Average Size %d\n",
242 CurrentTag
, c4
, c3
, c2
, c1
, CurrentNrBlocks
,
243 CurrentSize
, CurrentSize
/ CurrentNrBlocks
);
247 DbgPrint("Tag %x Blocks %d Total Size %d Average Size %d\n",
248 CurrentTag
, CurrentNrBlocks
, CurrentSize
,
249 CurrentSize
/ CurrentNrBlocks
);
252 #endif /* TAG_STATISTICS_TRACKING */
255 MiDebugDumpNonPagedPoolStats(BOOLEAN NewOnly
)
257 #ifdef TAG_STATISTICS_TRACKING
261 ULONG CurrentNrBlocks
;
266 DbgPrint("******* Dumping non paging pool stats ******\n");
269 for (i
= 0; i
< TAG_HASH_TABLE_SIZE
; i
++)
274 current
= tag_hash_table
[i
];
275 while (current
!= NULL
)
277 if (current
->Tag
!= CurrentTag
)
279 if (CurrentTag
!= 0 && CurrentNrBlocks
!= 0)
281 MiDumpTagStats(CurrentTag
, CurrentNrBlocks
, CurrentSize
);
283 CurrentTag
= current
->Tag
;
288 if (!NewOnly
|| !current
->Dumped
)
292 CurrentSize
= CurrentSize
+ current
->Size
;
293 TotalSize
= TotalSize
+ current
->Size
;
294 current
->Dumped
= TRUE
;
296 current
= current
->tag_next
;
298 if (CurrentTag
!= 0 && CurrentNrBlocks
!= 0)
300 MiDumpTagStats(CurrentTag
, CurrentNrBlocks
, CurrentSize
);
303 if (TotalBlocks
!= 0)
305 DbgPrint("TotalBlocks %d TotalSize %d AverageSize %d\n",
306 TotalBlocks
, TotalSize
, TotalSize
/ TotalBlocks
);
310 DbgPrint("TotalBlocks %d TotalSize %d\n",
311 TotalBlocks
, TotalSize
);
313 DbgPrint("***************** Dump Complete ***************\n");
314 #endif /* TAG_STATISTICS_TRACKING */
318 MiDebugDumpNonPagedPool(BOOLEAN NewOnly
)
321 PLIST_ENTRY current_entry
;
324 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
326 DbgPrint("******* Dumping non paging pool contents ******\n");
327 current_entry
= UsedBlockListHead
.Flink
;
328 while (current_entry
!= &UsedBlockListHead
)
330 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
331 if (!NewOnly
|| !current
->Dumped
)
335 c1
= (current
->Tag
>> 24) & 0xFF;
336 c2
= (current
->Tag
>> 16) & 0xFF;
337 c3
= (current
->Tag
>> 8) & 0xFF;
338 c4
= current
->Tag
& 0xFF;
340 if (isprint(c1
) && isprint(c2
) && isprint(c3
) && isprint(c4
))
342 DbgPrint("Size 0x%x Tag 0x%x (%c%c%c%c) Allocator 0x%x\n",
343 current
->Size
, current
->Tag
, c4
, c3
, c2
, c1
,
348 DbgPrint("Size 0x%x Tag 0x%x Allocator 0x%x\n",
349 current
->Size
, current
->Tag
, current
->Caller
);
351 current
->Dumped
= TRUE
;
353 current_entry
= current_entry
->Flink
;
355 DbgPrint("***************** Dump Complete ***************\n");
356 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
359 #ifndef WHOLE_PAGE_ALLOCATIONS
361 #ifdef ENABLE_VALIDATE_POOL
362 static void validate_free_list(void)
364 * FUNCTION: Validate the integrity of the list of free blocks
368 PLIST_ENTRY current_entry
;
369 unsigned int blocks_seen
=0;
371 current_entry
= FreeBlockListHead
.Flink
;
372 while (current_entry
!= &FreeBlockListHead
)
374 unsigned int base_addr
;
376 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
377 base_addr
= (int)current
;
379 if (current
->Magic
!= BLOCK_HDR_FREE_MAGIC
)
381 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
383 KeBugCheck(0); //(KBUG_POOL_FREE_LIST_CORRUPT);
386 if (base_addr
< (kernel_pool_base
) ||
387 (base_addr
+current
->Size
) > (kernel_pool_base
)+NONPAGED_POOL_SIZE
)
389 DbgPrint("Block %x found outside pool area\n",current
);
390 DbgPrint("Size %d\n",current
->Size
);
391 DbgPrint("Limits are %x %x\n",kernel_pool_base
,
392 kernel_pool_base
+NONPAGED_POOL_SIZE
);
393 KeBugCheck(0); //KBUG_POOL_FREE_LIST_CORRUPT);
396 if (blocks_seen
> EiNrFreeBlocks
)
398 DbgPrint("Too many blocks on free list\n");
399 KeBugCheck(0); //KBUG_POOL_FREE_LIST_CORRUPT);
401 if (current
->ListEntry
.Flink
!= &FreeBlockListHead
&&
402 current
->ListEntry
.Flink
->Blink
!= ¤t
->ListEntry
)
404 DbgPrint("%s:%d:Break in list (current %x next %x "
405 "current->next->previous %x)\n",
406 __FILE__
,__LINE__
,current
, current
->ListEntry
.Flink
,
407 current
->ListEntry
.Flink
->Blink
);
408 KeBugCheck(0); //KBUG_POOL_FREE_LIST_CORRUPT);
411 current_entry
= current_entry
->Flink
;
415 static void validate_used_list(void)
417 * FUNCTION: Validate the integrity of the list of used blocks
421 PLIST_ENTRY current_entry
;
422 unsigned int blocks_seen
=0;
424 current_entry
= UsedBlockListHead
.Flink
;
425 while (current_entry
!= &UsedBlockListHead
)
427 unsigned int base_addr
;
429 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
430 base_addr
= (int)current
;
432 if (current
->Magic
!= BLOCK_HDR_USED_MAGIC
)
434 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
436 KeBugCheck(0); //KBUG_POOL_FREE_LIST_CORRUPT);
438 if (base_addr
< (kernel_pool_base
) ||
439 (base_addr
+current
->Size
) >
440 (kernel_pool_base
)+NONPAGED_POOL_SIZE
)
442 DbgPrint("Block %x found outside pool area\n",current
);
443 KeBugCheck(0); //KBUG_POOL_FREE_LIST_CORRUPT);
446 if (blocks_seen
> EiNrUsedBlocks
)
448 DbgPrint("Too many blocks on used list\n");
449 KeBugCheck(0); //KBUG_POOL_FREE_LIST_CORRUPT);
451 if (current
->ListEntry
.Flink
!= &UsedBlockListHead
&&
452 current
->ListEntry
.Flink
->Blink
!= ¤t
->ListEntry
)
454 DbgPrint("Break in list (current %x next %x)\n",
455 current
, current
->ListEntry
.Flink
);
456 KeBugCheck(0); //KBUG_POOL_FREE_LIST_CORRUPT);
459 current_entry
= current_entry
->Flink
;
463 static void check_duplicates(BLOCK_HDR
* blk
)
465 * FUNCTION: Check a block has no duplicates
467 * blk = block to check
468 * NOTE: Bug checks if duplicates are found
471 unsigned int base
= (int)blk
;
472 unsigned int last
= ((int)blk
) + +sizeof(BLOCK_HDR
) + blk
->Size
;
474 PLIST_ENTRY current_entry
;
476 current_entry
= FreeBlockListHead
.Flink
;
477 while (current_entry
!= &FreeBlockListHead
)
479 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
481 if (current
->Magic
!= BLOCK_HDR_FREE_MAGIC
)
483 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
485 KeBugCheck(0); //KBUG_POOL_FREE_LIST_CORRUPT);
488 if ( (int)current
> base
&& (int)current
< last
)
490 DbgPrint("intersecting blocks on list\n");
493 if ( (int)current
< base
&&
494 ((int)current
+ current
->Size
+ sizeof(BLOCK_HDR
))
497 DbgPrint("intersecting blocks on list\n");
501 current_entry
= current_entry
->Flink
;
504 current_entry
= UsedBlockListHead
.Flink
;
505 while (current_entry
!= &UsedBlockListHead
)
507 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
509 if ( (int)current
> base
&& (int)current
< last
)
511 DbgPrint("intersecting blocks on list\n");
514 if ( (int)current
< base
&&
515 ((int)current
+ current
->Size
+ sizeof(BLOCK_HDR
))
518 DbgPrint("intersecting blocks on list\n");
522 current_entry
= current_entry
->Flink
;
527 static void validate_kernel_pool(void)
529 * FUNCTION: Checks the integrity of the kernel memory heap
533 PLIST_ENTRY current_entry
;
535 validate_free_list();
536 validate_used_list();
538 current_entry
= FreeBlockListHead
.Flink
;
539 while (current_entry
!= &FreeBlockListHead
)
541 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
542 check_duplicates(current
);
543 current_entry
= current_entry
->Flink
;
545 current_entry
= UsedBlockListHead
.Flink
;
546 while (current_entry
!= &UsedBlockListHead
)
548 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
549 check_duplicates(current
);
550 current_entry
= current_entry
->Flink
;
557 free_pages(BLOCK_HDR
* blk
)
564 end
= (ULONG
)blk
+ sizeof(BLOCK_HDR
) + blk
->Size
;
567 * If the block doesn't contain a whole page then there is nothing to do
569 if (PAGE_ROUND_UP(start
) >= PAGE_ROUND_DOWN(end
))
577 merge_free_block(BLOCK_HDR
* blk
)
579 PLIST_ENTRY next_entry
;
581 PLIST_ENTRY previous_entry
;
584 next_entry
= blk
->ListEntry
.Flink
;
585 if (next_entry
!= &FreeBlockListHead
)
587 next
= CONTAINING_RECORD(next_entry
, BLOCK_HDR
, ListEntry
);
588 if (((unsigned int)blk
+ sizeof(BLOCK_HDR
) + blk
->Size
) ==
591 RemoveEntryList(&next
->ListEntry
);
592 blk
->Size
= blk
->Size
+ sizeof(BLOCK_HDR
) + next
->Size
;
597 previous_entry
= blk
->ListEntry
.Blink
;
598 if (previous_entry
!= &FreeBlockListHead
)
600 previous
= CONTAINING_RECORD(previous_entry
, BLOCK_HDR
, ListEntry
);
601 if (((unsigned int)previous
+ sizeof(BLOCK_HDR
) + previous
->Size
) ==
604 RemoveEntryList(&blk
->ListEntry
);
605 previous
->Size
= previous
->Size
+ sizeof(BLOCK_HDR
) + blk
->Size
;
612 add_to_free_list(BLOCK_HDR
* blk
)
614 * FUNCTION: add the block to the free list (internal)
617 PLIST_ENTRY current_entry
;
620 current_entry
= FreeBlockListHead
.Flink
;
621 while (current_entry
!= &FreeBlockListHead
)
623 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
625 if ((unsigned int)current
> (unsigned int)blk
)
627 blk
->ListEntry
.Flink
= current_entry
;
628 blk
->ListEntry
.Blink
= current_entry
->Blink
;
629 current_entry
->Blink
->Flink
= &blk
->ListEntry
;
630 current_entry
->Blink
= &blk
->ListEntry
;
635 current_entry
= current_entry
->Flink
;
637 InsertTailList(&FreeBlockListHead
, &blk
->ListEntry
);
641 static void add_to_used_list(BLOCK_HDR
* blk
)
643 * FUNCTION: add the block to the used list (internal)
646 InsertHeadList(&UsedBlockListHead
, &blk
->ListEntry
);
651 static void remove_from_free_list(BLOCK_HDR
* current
)
653 RemoveEntryList(¤t
->ListEntry
);
658 static void remove_from_used_list(BLOCK_HDR
* current
)
660 RemoveEntryList(¤t
->ListEntry
);
665 inline static void* block_to_address(BLOCK_HDR
* blk
)
667 * FUNCTION: Translate a block header address to the corresponding block
671 return ( (void *) ((int)blk
+ sizeof(BLOCK_HDR
)) );
674 inline static BLOCK_HDR
* address_to_block(void* addr
)
677 ( ((int)addr
) - sizeof(BLOCK_HDR
) );
680 static BLOCK_HDR
* grow_kernel_pool(unsigned int size
, ULONG Tag
, PVOID Caller
)
682 * FUNCTION: Grow the executive heap to accomodate a block of at least 'size'
686 unsigned int total_size
= size
+ sizeof(BLOCK_HDR
);
687 unsigned int nr_pages
= PAGE_ROUND_UP(total_size
) / PAGE_SIZE
;
689 BLOCK_HDR
* used_blk
=NULL
;
690 BLOCK_HDR
* free_blk
=NULL
;
695 start
= (ULONG
)MiAllocNonPagedPoolRegion(nr_pages
);
697 DPRINT("growing heap for block size %d, ",size
);
698 DPRINT("start %x\n",start
);
700 for (i
=0;i
<nr_pages
;i
++)
702 PHYSICAL_ADDRESS Page
;
703 /* FIXME: Check whether we can really wait here. */
704 Status
= MmRequestPageMemoryConsumer(MC_NPPOOL
, TRUE
, &Page
);
705 if (!NT_SUCCESS(Status
))
710 Status
= MmCreateVirtualMapping(NULL
,
711 (PVOID
)(start
+ (i
*PAGE_SIZE
)),
715 if (!NT_SUCCESS(Status
))
717 DbgPrint("Unable to create virtual mapping\n");
722 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
723 if ((PAGE_SIZE
-(total_size
%PAGE_SIZE
))>(2*sizeof(BLOCK_HDR
)))
725 used_blk
= (struct _BLOCK_HDR
*)start
;
726 DPRINT("Creating block at %x\n",start
);
727 used_blk
->Magic
= BLOCK_HDR_USED_MAGIC
;
728 used_blk
->Size
= size
;
729 add_to_used_list(used_blk
);
731 free_blk
= (BLOCK_HDR
*)(start
+ sizeof(BLOCK_HDR
) + size
);
732 DPRINT("Creating block at %x\n",free_blk
);
733 free_blk
->Magic
= BLOCK_HDR_FREE_MAGIC
;
734 free_blk
->Size
= (nr_pages
* PAGE_SIZE
) -((sizeof(BLOCK_HDR
)*2) + size
);
735 add_to_free_list(free_blk
);
737 EiFreeNonPagedPool
= EiFreeNonPagedPool
+ free_blk
->Size
;
738 EiUsedNonPagedPool
= EiUsedNonPagedPool
+ used_blk
->Size
;
742 used_blk
= (struct _BLOCK_HDR
*)start
;
743 used_blk
->Magic
= BLOCK_HDR_USED_MAGIC
;
744 used_blk
->Size
= (nr_pages
* PAGE_SIZE
) - sizeof(BLOCK_HDR
);
745 add_to_used_list(used_blk
);
747 EiUsedNonPagedPool
= EiUsedNonPagedPool
+ used_blk
->Size
;
751 used_blk
->Caller
= Caller
;
752 used_blk
->Dumped
= FALSE
;
753 #ifdef TAG_STATISTICS_TRACKING
754 MiAddToTagHashTable(used_blk
);
755 #endif /* TAG_STATISTICS_TRACKING */
758 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
762 static void* take_block(BLOCK_HDR
* current
, unsigned int size
,
763 ULONG Tag
, PVOID Caller
)
765 * FUNCTION: Allocate a used block of least 'size' from the specified
767 * RETURNS: The address of the created memory block
771 * If the block is much bigger than required then split it and
772 * return a pointer to the allocated section. If the difference
773 * between the sizes is marginal it makes no sense to have the
776 if (current
->Size
> (1 + size
+ sizeof(BLOCK_HDR
)))
780 EiFreeNonPagedPool
= EiFreeNonPagedPool
- current
->Size
;
783 * Replace the bigger block with a smaller block in the
784 * same position in the list
786 free_blk
= (BLOCK_HDR
*)(((int)current
)
787 + sizeof(BLOCK_HDR
) + size
);
788 free_blk
->Magic
= BLOCK_HDR_FREE_MAGIC
;
789 InsertHeadList(¤t
->ListEntry
, &free_blk
->ListEntry
);
790 free_blk
->Size
= current
->Size
- (sizeof(BLOCK_HDR
) + size
);
793 RemoveEntryList(¤t
->ListEntry
);
794 InsertHeadList(&UsedBlockListHead
, ¤t
->ListEntry
);
796 current
->Magic
= BLOCK_HDR_USED_MAGIC
;
798 current
->Caller
= Caller
;
799 current
->Dumped
= FALSE
;
800 #ifdef TAG_STATISTICS_TRACKING
801 MiAddToTagHashTable(current
);
802 #endif /* TAG_STATISTICS_TRACKING */
804 EiUsedNonPagedPool
= EiUsedNonPagedPool
+ current
->Size
;
805 EiFreeNonPagedPool
= EiFreeNonPagedPool
+ free_blk
->Size
;
808 return(block_to_address(current
));
812 * Otherwise allocate the whole block
814 remove_from_free_list(current
);
815 add_to_used_list(current
);
817 EiFreeNonPagedPool
= EiFreeNonPagedPool
- current
->Size
;
818 EiUsedNonPagedPool
= EiUsedNonPagedPool
+ current
->Size
;
820 current
->Magic
= BLOCK_HDR_USED_MAGIC
;
822 current
->Caller
= Caller
;
823 current
->Dumped
= FALSE
;
824 #ifdef TAG_STATISTICS_TRACKING
825 MiAddToTagHashTable(current
);
826 #endif /* TAG_STATISTICS_TRACKING */
829 return(block_to_address(current
));
832 #endif /* not WHOLE_PAGE_ALLOCATIONS */
834 VOID STDCALL
ExFreeNonPagedPool (PVOID block
)
836 * FUNCTION: Releases previously allocated memory
838 * block = block to free
841 #ifdef WHOLE_PAGE_ALLOCATIONS /* WHOLE_PAGE_ALLOCATIONS */
849 DPRINT("freeing block %x\n",blk
);
851 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block
,blk
->size
,
852 ((PULONG
)&block
)[-1]);
854 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
856 ExFreeWholePageBlock(block
);
857 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
859 #else /* not WHOLE_PAGE_ALLOCATIONS */
861 BLOCK_HDR
* blk
=address_to_block(block
);
869 DPRINT("freeing block %x\n",blk
);
871 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block
,blk
->size
,
872 ((PULONG
)&block
)[-1]);
874 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
878 if (blk
->Magic
!= BLOCK_HDR_USED_MAGIC
)
880 if (blk
->Magic
== BLOCK_HDR_FREE_MAGIC
)
882 DbgPrint("ExFreePool of already freed address %x\n", block
);
886 DbgPrint("ExFreePool of non-allocated address %x (magic %x)\n",
893 memset(block
, 0xcc, blk
->Size
);
895 #ifdef TAG_STATISTICS_TRACKING
896 MiRemoveFromTagHashTable(blk
);
897 #endif /* TAG_STATISTICS_TRACKING */
898 remove_from_used_list(blk
);
899 blk
->Magic
= BLOCK_HDR_FREE_MAGIC
;
900 add_to_free_list(blk
);
901 merge_free_block(blk
);
903 EiUsedNonPagedPool
= EiUsedNonPagedPool
- blk
->Size
;
904 EiFreeNonPagedPool
= EiFreeNonPagedPool
+ blk
->Size
;
906 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
908 #endif /* WHOLE_PAGE_ALLOCATIONS */
912 ExAllocateNonPagedPoolWithTag(ULONG Type
,
917 #ifdef WHOLE_PAGE_ALLOCATIONS
921 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
924 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
927 * accomodate this useful idiom
931 POOL_TRACE("= NULL\n");
932 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
936 block
= ExAllocateWholePageBlock(Size
);
937 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
940 #else /* not WHOLE_PAGE_ALLOCATIONS */
941 BLOCK_HDR
* current
= NULL
;
942 PLIST_ENTRY current_entry
;
944 BLOCK_HDR
* best
= NULL
;
947 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
950 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
955 * accomodate this useful idiom
959 POOL_TRACE("= NULL\n");
960 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
965 * Look for an already created block of sufficent size
967 current_entry
= FreeBlockListHead
.Flink
;
968 while (current_entry
!= &FreeBlockListHead
)
970 DPRINT("current %x size %x tag_next %x\n",current
,current
->Size
,
972 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
973 if (current
->Size
>= Size
&&
974 (best
== NULL
|| current
->Size
< best
->Size
))
978 current_entry
= current_entry
->Flink
;
982 block
=take_block(best
, Size
, Tag
, Caller
);
984 memset(block
,0,Size
);
985 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
992 * Otherwise create a new block
994 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
995 block
=block_to_address(grow_kernel_pool(Size
, Tag
, Caller
));
997 memset(block
, 0, Size
);
1000 #endif /* WHOLE_PAGE_ALLOCATIONS */
1003 #ifdef WHOLE_PAGE_ALLOCATIONS
1006 ExAllocateWholePageBlock(ULONG UserSize
)
1009 PHYSICAL_ADDRESS Page
;
1014 Size
= sizeof(ULONG
) + UserSize
;
1015 NrPages
= ROUND_UP(Size
, PAGE_SIZE
) / PAGE_SIZE
;
1017 Address
= MiAllocNonPagedPoolRegion(NrPages
+ 1);
1019 for (i
= 0; i
< NrPages
; i
++)
1021 Page
= MmAllocPage(MC_NPPOOL
, 0);
1022 if (Page
.QuadPart
== 0LL)
1026 MmCreateVirtualMapping(NULL
,
1027 Address
+ (i
* PAGE_SIZE
),
1028 PAGE_READWRITE
| PAGE_SYSTEM
,
1033 *((PULONG
)((ULONG
)Address
+ (NrPages
* PAGE_SIZE
) - Size
)) = NrPages
;
1034 return((PVOID
)((ULONG
)Address
+ (NrPages
* PAGE_SIZE
) - UserSize
));
1038 ExFreeWholePageBlock(PVOID Addr
)
1042 if ((ULONG
)Addr
< kernel_pool_base
||
1043 (ULONG
)Addr
>= (kernel_pool_base
+ NONPAGED_POOL_SIZE
))
1045 DbgPrint("Block %x found outside pool area\n", Addr
);
1048 NrPages
= *(PULONG
)((ULONG
)Addr
- sizeof(ULONG
));
1049 MiFreeNonPagedPoolRegion((PVOID
)PAGE_ROUND_DOWN((ULONG
)Addr
), NrPages
, TRUE
);
1052 #endif /* WHOLE_PAGE_ALLOCATIONS */