1 /* $Id: npool.c,v 1.48 2001/10/29 02:39:38 dwelch Exp $
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/npool.c
6 * PURPOSE: Implements the kernel memory pool
7 * PROGRAMMER: David Welch (welch@cwcom.net)
10 * 10/06/98: Bug fixes by Iwan Fatahi (i_fatahi@hotmail.com)
11 * in take_block (if current bigger than required)
12 * in remove_from_used_list
14 * 23/08/98: Fixes from Robert Bergkvist (fragdance@hotmail.com)
17 /* INCLUDES ****************************************************************/
19 #include <ddk/ntddk.h>
20 #include <internal/mm.h>
21 #include <internal/bitops.h>
22 #include <internal/ntoskrnl.h>
23 #include <internal/pool.h>
26 #include <internal/debug.h>
28 /* Enable strict checking of the nonpaged pool on every allocation */
29 //#define ENABLE_VALIDATE_POOL
31 /* Enable tracking of statistics about the tagged blocks in the pool */
32 #define TAG_STATISTICS_TRACKING
35 * Put each block in its own range of pages and position the block at the
36 * end of the range so any accesses beyond the end of block are to invalid
38 * FIXME: Not implemented yet.
40 /* #define WHOLE_PAGE_ALLOCATIONS */
42 #ifdef ENABLE_VALIDATE_POOL
43 #define VALIDATE_POOL validate_kernel_pool()
49 #define POOL_TRACE(args...) do { DbgPrint(args); } while(0);
51 #define POOL_TRACE(args...)
54 /* TYPES *******************************************************************/
56 #define BLOCK_HDR_USED_MAGIC (0xdeadbeef)
57 #define BLOCK_HDR_FREE_MAGIC (0xceadbeef)
60 * fields present at the start of a block (this is for internal use only)
62 typedef struct _BLOCK_HDR
69 struct _BLOCK_HDR
* tag_next
;
73 /* GLOBALS *****************************************************************/
76 * Memory managment initalized symbol for the base of the pool
78 static unsigned int kernel_pool_base
= 0;
81 * Head of the list of free blocks
83 static LIST_ENTRY FreeBlockListHead
;
86 * Head of the list of in use block
88 static LIST_ENTRY UsedBlockListHead
;
91 * Count of free blocks
93 static ULONG EiNrFreeBlocks
= 0;
96 * Count of used blocks
98 static ULONG EiNrUsedBlocks
= 0;
101 * Lock that protects the non-paged pool data structures
103 static KSPIN_LOCK MmNpoolLock
;
106 * Total memory used for free nonpaged pool blocks
108 ULONG EiFreeNonPagedPool
= 0;
111 * Total memory used for nonpaged pool blocks
113 ULONG EiUsedNonPagedPool
= 0;
116 * Allocate a range of memory in the nonpaged pool
119 MiAllocNonPagedPoolRegion(unsigned int nr_pages
);
121 #ifdef TAG_STATISTICS_TRACKING
122 #define TAG_HASH_TABLE_SIZE (1024)
123 static BLOCK_HDR
* tag_hash_table
[TAG_HASH_TABLE_SIZE
];
124 #endif /* TAG_STATISTICS_TRACKING */
126 /* FUNCTIONS ***************************************************************/
128 #ifdef TAG_STATISTICS_TRACKING
130 MiRemoveFromTagHashTable(BLOCK_HDR
* block
)
132 * Remove a block from the tag hash table
144 hash
= block
->Tag
% TAG_HASH_TABLE_SIZE
;
147 current
= tag_hash_table
[hash
];
148 while (current
!= NULL
)
150 if (current
== block
)
152 if (previous
== NULL
)
154 tag_hash_table
[hash
] = block
->tag_next
;
158 previous
->tag_next
= block
->tag_next
;
163 current
= current
->tag_next
;
165 DPRINT1("Tagged block wasn't on hash table list (Tag %x Caller %x)\n",
166 block
->Tag
, block
->Caller
);
171 MiAddToTagHashTable(BLOCK_HDR
* block
)
173 * Add a block to the tag hash table
185 hash
= block
->Tag
% TAG_HASH_TABLE_SIZE
;
188 current
= tag_hash_table
[hash
];
189 while (current
!= NULL
)
191 if (current
->Tag
== block
->Tag
)
193 block
->tag_next
= current
->tag_next
;
194 current
->tag_next
= block
;
198 if ((PVOID
)current
->tag_next
>= (PVOID
)0xc1123160)
200 DbgPrint("previous %x\n", previous
);
202 current
= current
->tag_next
;
204 block
->tag_next
= NULL
;
205 if (previous
== NULL
)
207 tag_hash_table
[hash
] = block
;
211 previous
->tag_next
= block
;
214 #endif /* TAG_STATISTICS_TRACKING */
217 ExInitNonPagedPool(ULONG BaseAddress
)
219 kernel_pool_base
= BaseAddress
;
220 KeInitializeSpinLock(&MmNpoolLock
);
221 MmInitKernelMap((PVOID
)BaseAddress
);
222 memset(tag_hash_table
, 0, sizeof(tag_hash_table
));
223 InitializeListHead(&FreeBlockListHead
);
224 InitializeListHead(&UsedBlockListHead
);
227 #ifdef TAG_STATISTICS_TRACKING
229 MiDumpTagStats(ULONG CurrentTag
, ULONG CurrentNrBlocks
, ULONG CurrentSize
)
233 c1
= (CurrentTag
>> 24) & 0xFF;
234 c2
= (CurrentTag
>> 16) & 0xFF;
235 c3
= (CurrentTag
>> 8) & 0xFF;
236 c4
= CurrentTag
& 0xFF;
238 if (isprint(c1
) && isprint(c2
) && isprint(c3
) && isprint(c4
))
240 DbgPrint("Tag %x (%c%c%c%c) Blocks %d Total Size %d Average Size %d\n",
241 CurrentTag
, c4
, c3
, c2
, c1
, CurrentNrBlocks
,
242 CurrentSize
, CurrentSize
/ CurrentNrBlocks
);
246 DbgPrint("Tag %x Blocks %d Total Size %d Average Size %d\n",
247 CurrentTag
, CurrentNrBlocks
, CurrentSize
,
248 CurrentSize
/ CurrentNrBlocks
);
251 #endif /* TAG_STATISTICS_TRACKING */
254 MiDebugDumpNonPagedPoolStats(BOOLEAN NewOnly
)
256 #ifdef TAG_STATISTICS_TRACKING
260 ULONG CurrentNrBlocks
;
265 DbgPrint("******* Dumping non paging pool stats ******\n");
268 for (i
= 0; i
< TAG_HASH_TABLE_SIZE
; i
++)
273 current
= tag_hash_table
[i
];
274 while (current
!= NULL
)
276 if (current
->Tag
!= CurrentTag
)
278 if (CurrentTag
!= 0 && CurrentNrBlocks
!= 0)
280 MiDumpTagStats(CurrentTag
, CurrentNrBlocks
, CurrentSize
);
282 CurrentTag
= current
->Tag
;
287 if (!NewOnly
|| !current
->Dumped
)
291 CurrentSize
= CurrentSize
+ current
->Size
;
292 TotalSize
= TotalSize
+ current
->Size
;
293 current
->Dumped
= TRUE
;
295 current
= current
->tag_next
;
297 if (CurrentTag
!= 0 && CurrentNrBlocks
!= 0)
299 MiDumpTagStats(CurrentTag
, CurrentNrBlocks
, CurrentSize
);
302 if (TotalBlocks
!= 0)
304 DbgPrint("TotalBlocks %d TotalSize %d AverageSize %d\n",
305 TotalBlocks
, TotalSize
, TotalSize
/ TotalBlocks
);
309 DbgPrint("TotalBlocks %d TotalSize %d\n",
310 TotalBlocks
, TotalSize
);
312 DbgPrint("***************** Dump Complete ***************\n");
313 #endif /* TAG_STATISTICS_TRACKING */
317 MiDebugDumpNonPagedPool(BOOLEAN NewOnly
)
320 PLIST_ENTRY current_entry
;
323 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
325 DbgPrint("******* Dumping non paging pool contents ******\n");
326 current_entry
= UsedBlockListHead
.Flink
;
327 while (current_entry
!= &UsedBlockListHead
)
329 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
330 if (!NewOnly
|| !current
->Dumped
)
334 c1
= (current
->Tag
>> 24) & 0xFF;
335 c2
= (current
->Tag
>> 16) & 0xFF;
336 c3
= (current
->Tag
>> 8) & 0xFF;
337 c4
= current
->Tag
& 0xFF;
339 if (isprint(c1
) && isprint(c2
) && isprint(c3
) && isprint(c4
))
341 DbgPrint("Size 0x%x Tag 0x%x (%c%c%c%c) Allocator 0x%x\n",
342 current
->Size
, current
->Tag
, c4
, c3
, c2
, c1
,
347 DbgPrint("Size 0x%x Tag 0x%x Allocator 0x%x\n",
348 current
->Size
, current
->Tag
, current
->Caller
);
350 current
->Dumped
= TRUE
;
352 current_entry
= current_entry
->Flink
;
354 DbgPrint("***************** Dump Complete ***************\n");
355 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
358 #ifdef ENABLE_VALIDATE_POOL
359 static void validate_free_list(void)
361 * FUNCTION: Validate the integrity of the list of free blocks
365 PLIST_ENTRY current_entry
;
366 unsigned int blocks_seen
=0;
368 current_entry
= FreeBlockListHead
.Flink
;
369 while (current_entry
!= &FreeBlockListHead
)
371 unsigned int base_addr
;
373 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
374 base_addr
= (int)current
;
376 if (current
->Magic
!= BLOCK_HDR_FREE_MAGIC
)
378 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
380 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT
);
383 if (base_addr
< (kernel_pool_base
) ||
384 (base_addr
+current
->Size
) > (kernel_pool_base
)+NONPAGED_POOL_SIZE
)
386 DbgPrint("Block %x found outside pool area\n",current
);
387 DbgPrint("Size %d\n",current
->Size
);
388 DbgPrint("Limits are %x %x\n",kernel_pool_base
,
389 kernel_pool_base
+NONPAGED_POOL_SIZE
);
390 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT
);
393 if (blocks_seen
> EiNrFreeBlocks
)
395 DbgPrint("Too many blocks on free list\n");
396 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT
);
398 if (current
->ListEntry
.Flink
!= &FreeBlockListHead
&&
399 current
->ListEntry
.Flink
->Blink
!= ¤t
->ListEntry
)
401 DbgPrint("%s:%d:Break in list (current %x next %x "
402 "current->next->previous %x)\n",
403 __FILE__
,__LINE__
,current
, current
->ListEntry
.Flink
,
404 current
->ListEntry
.Flink
->Blink
);
405 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT
);
408 current_entry
= current_entry
->Flink
;
412 static void validate_used_list(void)
414 * FUNCTION: Validate the integrity of the list of used blocks
418 PLIST_ENTRY current_entry
;
419 unsigned int blocks_seen
=0;
421 current_entry
= UsedBlockListHead
.Flink
;
422 while (current_entry
!= &UsedBlockListHead
)
424 unsigned int base_addr
;
426 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
427 base_addr
= (int)current
;
429 if (current
->Magic
!= BLOCK_HDR_USED_MAGIC
)
431 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
433 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT
);
435 if (base_addr
< (kernel_pool_base
) ||
436 (base_addr
+current
->Size
) >
437 (kernel_pool_base
)+NONPAGED_POOL_SIZE
)
439 DbgPrint("Block %x found outside pool area\n",current
);
443 if (blocks_seen
> EiNrUsedBlocks
)
445 DbgPrint("Too many blocks on used list\n");
448 if (current
->ListEntry
.Flink
!= &UsedBlockListHead
&&
449 current
->ListEntry
.Flink
->Blink
!= ¤t
->ListEntry
)
451 DbgPrint("Break in list (current %x next %x)\n",
452 current
, current
->ListEntry
.Flink
);
456 current_entry
= current_entry
->Flink
;
460 static void check_duplicates(BLOCK_HDR
* blk
)
462 * FUNCTION: Check a block has no duplicates
464 * blk = block to check
465 * NOTE: Bug checks if duplicates are found
468 unsigned int base
= (int)blk
;
469 unsigned int last
= ((int)blk
) + +sizeof(BLOCK_HDR
) + blk
->Size
;
471 PLIST_ENTRY current_entry
;
473 current_entry
= FreeBlockListHead
.Flink
;
474 while (current_entry
!= &FreeBlockListHead
)
476 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
478 if (current
->Magic
!= BLOCK_HDR_FREE_MAGIC
)
480 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
482 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT
);
485 if ( (int)current
> base
&& (int)current
< last
)
487 DbgPrint("intersecting blocks on list\n");
490 if ( (int)current
< base
&&
491 ((int)current
+ current
->Size
+ sizeof(BLOCK_HDR
))
494 DbgPrint("intersecting blocks on list\n");
498 current_entry
= current_entry
->Flink
;
501 current_entry
= UsedBlockListHead
.Flink
;
502 while (current_entry
!= &UsedBlockListHead
)
504 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
506 if ( (int)current
> base
&& (int)current
< last
)
508 DbgPrint("intersecting blocks on list\n");
511 if ( (int)current
< base
&&
512 ((int)current
+ current
->Size
+ sizeof(BLOCK_HDR
))
515 DbgPrint("intersecting blocks on list\n");
519 current_entry
= current_entry
->Flink
;
524 static void validate_kernel_pool(void)
526 * FUNCTION: Checks the integrity of the kernel memory heap
530 PLIST_ENTRY current_entry
;
532 validate_free_list();
533 validate_used_list();
535 current_entry
= FreeBlockListHead
.Flink
;
536 while (current_entry
!= &FreeBlockListHead
)
538 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
539 check_duplicates(current
);
540 current_entry
= current_entry
->Flink
;
542 current_entry
= UsedBlockListHead
.Flink
;
543 while (current_entry
!= &UsedBlockListHead
)
545 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
546 check_duplicates(current
);
547 current_entry
= current_entry
->Flink
;
554 free_pages(BLOCK_HDR
* blk
)
561 end
= (ULONG
)blk
+ sizeof(BLOCK_HDR
) + blk
->Size
;
564 * If the block doesn't contain a whole page then there is nothing to do
566 if (PAGE_ROUND_UP(start
) >= PAGE_ROUND_DOWN(end
))
574 merge_free_block(BLOCK_HDR
* blk
)
576 PLIST_ENTRY next_entry
;
578 PLIST_ENTRY previous_entry
;
581 next_entry
= blk
->ListEntry
.Flink
;
582 if (next_entry
!= &FreeBlockListHead
)
584 next
= CONTAINING_RECORD(next_entry
, BLOCK_HDR
, ListEntry
);
585 if (((unsigned int)blk
+ sizeof(BLOCK_HDR
) + blk
->Size
) ==
588 RemoveEntryList(&next
->ListEntry
);
589 blk
->Size
= blk
->Size
+ sizeof(BLOCK_HDR
) + next
->Size
;
594 previous_entry
= blk
->ListEntry
.Blink
;
595 if (previous_entry
!= &FreeBlockListHead
)
597 previous
= CONTAINING_RECORD(previous_entry
, BLOCK_HDR
, ListEntry
);
598 if (((unsigned int)previous
+ sizeof(BLOCK_HDR
) + previous
->Size
) ==
601 RemoveEntryList(&blk
->ListEntry
);
602 previous
->Size
= previous
->Size
+ sizeof(BLOCK_HDR
) + blk
->Size
;
609 add_to_free_list(BLOCK_HDR
* blk
)
611 * FUNCTION: add the block to the free list (internal)
614 PLIST_ENTRY current_entry
;
617 current_entry
= FreeBlockListHead
.Flink
;
618 while (current_entry
!= &FreeBlockListHead
)
620 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
622 if ((unsigned int)current
> (unsigned int)blk
)
624 blk
->ListEntry
.Flink
= current_entry
;
625 blk
->ListEntry
.Blink
= current_entry
->Blink
;
626 current_entry
->Blink
->Flink
= &blk
->ListEntry
;
627 current_entry
->Blink
= &blk
->ListEntry
;
632 current_entry
= current_entry
->Flink
;
634 InsertTailList(&FreeBlockListHead
, &blk
->ListEntry
);
638 static void add_to_used_list(BLOCK_HDR
* blk
)
640 * FUNCTION: add the block to the used list (internal)
643 InsertHeadList(&UsedBlockListHead
, &blk
->ListEntry
);
648 static void remove_from_free_list(BLOCK_HDR
* current
)
650 RemoveEntryList(¤t
->ListEntry
);
655 static void remove_from_used_list(BLOCK_HDR
* current
)
657 RemoveEntryList(¤t
->ListEntry
);
662 inline static void* block_to_address(BLOCK_HDR
* blk
)
664 * FUNCTION: Translate a block header address to the corresponding block
668 return ( (void *) ((int)blk
+ sizeof(BLOCK_HDR
)) );
671 inline static BLOCK_HDR
* address_to_block(void* addr
)
674 ( ((int)addr
) - sizeof(BLOCK_HDR
) );
677 static BLOCK_HDR
* grow_kernel_pool(unsigned int size
, ULONG Tag
, PVOID Caller
)
679 * FUNCTION: Grow the executive heap to accomodate a block of at least 'size'
683 unsigned int total_size
= size
+ sizeof(BLOCK_HDR
);
684 unsigned int nr_pages
= PAGE_ROUND_UP(total_size
) / PAGESIZE
;
685 unsigned int start
= (ULONG
)MiAllocNonPagedPoolRegion(nr_pages
);
686 BLOCK_HDR
* used_blk
=NULL
;
687 BLOCK_HDR
* free_blk
=NULL
;
691 DPRINT("growing heap for block size %d, ",size
);
692 DPRINT("start %x\n",start
);
694 for (i
=0;i
<nr_pages
;i
++)
696 Status
= MmCreateVirtualMapping(NULL
,
697 (PVOID
)(start
+ (i
*PAGESIZE
)),
699 (ULONG
)MmAllocPage(0));
700 if (!NT_SUCCESS(Status
))
702 DbgPrint("Unable to create virtual mapping\n");
708 if ((PAGESIZE
-(total_size
%PAGESIZE
))>(2*sizeof(BLOCK_HDR
)))
710 used_blk
= (struct _BLOCK_HDR
*)start
;
711 DPRINT("Creating block at %x\n",start
);
712 used_blk
->Magic
= BLOCK_HDR_USED_MAGIC
;
713 used_blk
->Size
= size
;
714 add_to_used_list(used_blk
);
716 free_blk
= (BLOCK_HDR
*)(start
+ sizeof(BLOCK_HDR
) + size
);
717 DPRINT("Creating block at %x\n",free_blk
);
718 free_blk
->Magic
= BLOCK_HDR_FREE_MAGIC
;
719 free_blk
->Size
= (nr_pages
* PAGESIZE
) -((sizeof(BLOCK_HDR
)*2) + size
);
720 add_to_free_list(free_blk
);
722 EiFreeNonPagedPool
= EiFreeNonPagedPool
+ free_blk
->Size
;
723 EiUsedNonPagedPool
= EiUsedNonPagedPool
+ used_blk
->Size
;
727 used_blk
= (struct _BLOCK_HDR
*)start
;
728 used_blk
->Magic
= BLOCK_HDR_USED_MAGIC
;
729 used_blk
->Size
= (nr_pages
* PAGESIZE
) - sizeof(BLOCK_HDR
);
730 add_to_used_list(used_blk
);
732 EiUsedNonPagedPool
= EiUsedNonPagedPool
+ used_blk
->Size
;
736 used_blk
->Caller
= Caller
;
737 used_blk
->Dumped
= FALSE
;
738 #ifdef TAG_STATISTICS_TRACKING
739 MiAddToTagHashTable(used_blk
);
740 #endif /* TAG_STATISTICS_TRACKING */
746 static void* take_block(BLOCK_HDR
* current
, unsigned int size
,
747 ULONG Tag
, PVOID Caller
)
749 * FUNCTION: Allocate a used block of least 'size' from the specified
751 * RETURNS: The address of the created memory block
755 * If the block is much bigger than required then split it and
756 * return a pointer to the allocated section. If the difference
757 * between the sizes is marginal it makes no sense to have the
760 if (current
->Size
> (1 + size
+ sizeof(BLOCK_HDR
)))
764 EiFreeNonPagedPool
= EiFreeNonPagedPool
- current
->Size
;
767 * Replace the bigger block with a smaller block in the
768 * same position in the list
770 free_blk
= (BLOCK_HDR
*)(((int)current
)
771 + sizeof(BLOCK_HDR
) + size
);
772 free_blk
->Magic
= BLOCK_HDR_FREE_MAGIC
;
773 InsertHeadList(¤t
->ListEntry
, &free_blk
->ListEntry
);
774 free_blk
->Size
= current
->Size
- (sizeof(BLOCK_HDR
) + size
);
777 RemoveEntryList(¤t
->ListEntry
);
778 InsertHeadList(&UsedBlockListHead
, ¤t
->ListEntry
);
780 current
->Magic
= BLOCK_HDR_USED_MAGIC
;
782 current
->Caller
= Caller
;
783 current
->Dumped
= FALSE
;
784 #ifdef TAG_STATISTICS_TRACKING
785 MiAddToTagHashTable(current
);
786 #endif /* TAG_STATISTICS_TRACKING */
788 EiUsedNonPagedPool
= EiUsedNonPagedPool
+ current
->Size
;
789 EiFreeNonPagedPool
= EiFreeNonPagedPool
+ free_blk
->Size
;
792 return(block_to_address(current
));
796 * Otherwise allocate the whole block
798 remove_from_free_list(current
);
799 add_to_used_list(current
);
801 EiFreeNonPagedPool
= EiFreeNonPagedPool
- current
->Size
;
802 EiUsedNonPagedPool
= EiUsedNonPagedPool
+ current
->Size
;
804 current
->Magic
= BLOCK_HDR_USED_MAGIC
;
806 current
->Caller
= Caller
;
807 current
->Dumped
= FALSE
;
808 #ifdef TAG_STATISTICS_TRACKING
809 MiAddToTagHashTable(current
);
810 #endif /* TAG_STATISTICS_TRACKING */
813 return(block_to_address(current
));
816 VOID STDCALL
ExFreePool (PVOID block
)
818 * FUNCTION: Releases previously allocated memory
820 * block = block to free
823 BLOCK_HDR
* blk
=address_to_block(block
);
829 DPRINT("freeing block %x\n",blk
);
831 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block
,blk
->size
,
832 ((PULONG
)&block
)[-1]);
834 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
838 if (blk
->Magic
!= BLOCK_HDR_USED_MAGIC
)
840 if (blk
->Magic
== BLOCK_HDR_FREE_MAGIC
)
842 DbgPrint("ExFreePool of already freed address %x\n", block
);
846 DbgPrint("ExFreePool of non-allocated address %x (magic %x)\n",
853 memset(block
, 0xcc, blk
->Size
);
855 #ifdef TAG_STATISTICS_TRACKING
856 MiRemoveFromTagHashTable(blk
);
857 #endif /* TAG_STATISTICS_TRACKING */
858 remove_from_used_list(blk
);
859 blk
->Magic
= BLOCK_HDR_FREE_MAGIC
;
860 add_to_free_list(blk
);
861 merge_free_block(blk
);
863 EiUsedNonPagedPool
= EiUsedNonPagedPool
- blk
->Size
;
864 EiFreeNonPagedPool
= EiFreeNonPagedPool
+ blk
->Size
;
868 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
872 ExAllocateNonPagedPoolWithTag(ULONG Type
, ULONG Size
, ULONG Tag
, PVOID Caller
)
874 BLOCK_HDR
* current
= NULL
;
875 PLIST_ENTRY current_entry
;
877 BLOCK_HDR
* best
= NULL
;
880 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
883 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
888 * accomodate this useful idiom
892 POOL_TRACE("= NULL\n");
893 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
898 * Look for an already created block of sufficent size
900 current_entry
= FreeBlockListHead
.Flink
;
901 while (current_entry
!= &FreeBlockListHead
)
903 DPRINT("current %x size %x tag_next %x\n",current
,current
->Size
,
905 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
906 if (current
->Size
>= Size
&&
907 (best
== NULL
|| current
->Size
< best
->Size
))
911 current_entry
= current_entry
->Flink
;
915 block
=take_block(best
, Size
, Tag
, Caller
);
917 memset(block
,0,Size
);
918 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
924 * Otherwise create a new block
926 block
=block_to_address(grow_kernel_pool(Size
, Tag
, Caller
));
928 memset(block
, 0, Size
);
929 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);