1 /* $Id: npool.c,v 1.42 2001/03/14 23:19:14 dwelch Exp $
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/npool.c
6 * PURPOSE: Implements the kernel memory pool
7 * PROGRAMMER: David Welch (welch@cwcom.net)
10 * 10/06/98: Bug fixes by Iwan Fatahi (i_fatahi@hotmail.com)
11 * in take_block (if current bigger than required)
12 * in remove_from_used_list
14 * 23/08/98: Fixes from Robert Bergkvist (fragdance@hotmail.com)
17 /* INCLUDES ****************************************************************/
19 #include <ddk/ntddk.h>
20 #include <internal/mm.h>
21 #include <internal/mmhal.h>
22 #include <internal/bitops.h>
23 #include <internal/ntoskrnl.h>
24 #include <internal/pool.h>
27 #include <internal/debug.h>
29 /* Enable strict checking of the nonpaged pool on every allocation */
30 /* #define ENABLE_VALIDATE_POOL */
32 /* Enable tracking of statistics about the tagged blocks in the pool */
33 #define TAG_STATISTICS_TRACKING
36 * Put each block in its own range of pages and position the block at the
37 * end of the range so any accesses beyond the end of block are to invalid
39 * FIXME: Not implemented yet.
41 /* #define WHOLE_PAGE_ALLOCATIONS */
43 #ifdef ENABLE_VALIDATE_POOL
44 #define VALIDATE_POOL validate_kernel_pool()
50 #define POOL_TRACE(args...) do { DbgPrint(args); } while(0);
52 #define POOL_TRACE(args...)
55 /* TYPES *******************************************************************/
57 #define BLOCK_HDR_USED_MAGIC (0xdeadbeef)
58 #define BLOCK_HDR_FREE_MAGIC (0xceadbeef)
61 * fields present at the start of a block (this is for internal use only)
63 typedef struct _BLOCK_HDR
70 struct _BLOCK_HDR
* tag_next
;
74 /* GLOBALS *****************************************************************/
77 * Memory managment initalized symbol for the base of the pool
79 static unsigned int kernel_pool_base
= 0;
82 * Head of the list of free blocks
84 static LIST_ENTRY FreeBlockListHead
;
87 * Head of the list of in use block
89 static LIST_ENTRY UsedBlockListHead
;
92 * Count of free blocks
94 static ULONG EiNrFreeBlocks
= 0;
97 * Count of used blocks
99 static ULONG EiNrUsedBlocks
= 0;
102 * Lock that protects the non-paged pool data structures
104 static KSPIN_LOCK MmNpoolLock
;
107 * Total memory used for free nonpaged pool blocks
109 ULONG EiFreeNonPagedPool
= 0;
112 * Total memory used for nonpaged pool blocks
114 ULONG EiUsedNonPagedPool
= 0;
117 * Allocate a range of memory in the nonpaged pool
120 MiAllocNonPagedPoolRegion(unsigned int nr_pages
);
122 #ifdef TAG_STATISTICS_TRACKING
123 #define TAG_HASH_TABLE_SIZE (1024)
124 static BLOCK_HDR
* tag_hash_table
[TAG_HASH_TABLE_SIZE
];
125 #endif /* TAG_STATISTICS_TRACKING */
127 /* FUNCTIONS ***************************************************************/
129 #ifdef TAG_STATISTICS_TRACKING
131 MiRemoveFromTagHashTable(BLOCK_HDR
* block
)
133 * Remove a block from the tag hash table
145 hash
= block
->Tag
% TAG_HASH_TABLE_SIZE
;
148 current
= tag_hash_table
[hash
];
149 while (current
!= NULL
)
151 if (current
== block
)
153 if (previous
== NULL
)
155 tag_hash_table
[hash
] = block
->tag_next
;
159 previous
->tag_next
= block
->tag_next
;
164 current
= current
->tag_next
;
166 DPRINT1("Tagged block wasn't on hash table list (Tag %x Caller %x)\n",
167 block
->Tag
, block
->Caller
);
172 MiAddToTagHashTable(BLOCK_HDR
* block
)
174 * Add a block to the tag hash table
186 hash
= block
->Tag
% TAG_HASH_TABLE_SIZE
;
189 current
= tag_hash_table
[hash
];
190 while (current
!= NULL
)
192 if (current
->Tag
== block
->Tag
)
194 block
->tag_next
= current
->tag_next
;
195 current
->tag_next
= block
;
199 current
= current
->tag_next
;
201 block
->tag_next
= NULL
;
202 if (previous
== NULL
)
204 tag_hash_table
[hash
] = block
;
208 previous
->tag_next
= block
;
211 #endif /* TAG_STATISTICS_TRACKING */
214 ExInitNonPagedPool(ULONG BaseAddress
)
216 kernel_pool_base
= BaseAddress
;
217 KeInitializeSpinLock(&MmNpoolLock
);
218 MmInitKernelMap((PVOID
)BaseAddress
);
219 memset(tag_hash_table
, 0, sizeof(tag_hash_table
));
220 InitializeListHead(&FreeBlockListHead
);
221 InitializeListHead(&UsedBlockListHead
);
224 #ifdef TAG_STATISTICS_TRACKING
226 MiDumpTagStats(ULONG CurrentTag
, ULONG CurrentNrBlocks
, ULONG CurrentSize
)
230 c1
= (CurrentTag
>> 24) & 0xFF;
231 c2
= (CurrentTag
>> 16) & 0xFF;
232 c3
= (CurrentTag
>> 8) & 0xFF;
233 c4
= CurrentTag
& 0xFF;
235 if (isprint(c1
) && isprint(c2
) && isprint(c3
) && isprint(c4
))
237 DbgPrint("Tag %x (%c%c%c%c) Blocks %d Total Size %d Average Size %d\n",
238 CurrentTag
, c4
, c3
, c2
, c1
, CurrentNrBlocks
,
239 CurrentSize
, CurrentSize
/ CurrentNrBlocks
);
243 DbgPrint("Tag %x Blocks %d Total Size %d Average Size %d\n",
244 CurrentTag
, CurrentNrBlocks
, CurrentSize
,
245 CurrentSize
/ CurrentNrBlocks
);
248 #endif /* TAG_STATISTICS_TRACKING */
251 MiDebugDumpNonPagedPoolStats(BOOLEAN NewOnly
)
253 #ifdef TAG_STATISTICS_TRACKING
257 ULONG CurrentNrBlocks
;
262 DbgPrint("******* Dumping non paging pool stats ******\n");
265 for (i
= 0; i
< TAG_HASH_TABLE_SIZE
; i
++)
270 current
= tag_hash_table
[i
];
271 while (current
!= NULL
)
273 if (current
->Tag
!= CurrentTag
)
275 if (CurrentTag
!= 0 && CurrentNrBlocks
!= 0)
277 MiDumpTagStats(CurrentTag
, CurrentNrBlocks
, CurrentSize
);
279 CurrentTag
= current
->Tag
;
284 if (!NewOnly
|| !current
->Dumped
)
288 CurrentSize
= CurrentSize
+ current
->Size
;
289 TotalSize
= TotalSize
+ current
->Size
;
290 current
->Dumped
= TRUE
;
292 current
= current
->tag_next
;
294 if (CurrentTag
!= 0 && CurrentNrBlocks
!= 0)
296 MiDumpTagStats(CurrentTag
, CurrentNrBlocks
, CurrentSize
);
299 if (TotalBlocks
!= 0)
301 DbgPrint("TotalBlocks %d TotalSize %d AverageSize %d\n",
302 TotalBlocks
, TotalSize
, TotalSize
/ TotalBlocks
);
306 DbgPrint("TotalBlocks %d TotalSize %d\n",
307 TotalBlocks
, TotalSize
);
309 DbgPrint("***************** Dump Complete ***************\n");
310 #endif /* TAG_STATISTICS_TRACKING */
314 MiDebugDumpNonPagedPool(BOOLEAN NewOnly
)
317 PLIST_ENTRY current_entry
;
320 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
322 DbgPrint("******* Dumping non paging pool contents ******\n");
323 current_entry
= UsedBlockListHead
.Flink
;
324 while (current_entry
!= &UsedBlockListHead
)
326 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
327 if (!NewOnly
|| !current
->Dumped
)
331 c1
= (current
->Tag
>> 24) & 0xFF;
332 c2
= (current
->Tag
>> 16) & 0xFF;
333 c3
= (current
->Tag
>> 8) & 0xFF;
334 c4
= current
->Tag
& 0xFF;
336 if (isprint(c1
) && isprint(c2
) && isprint(c3
) && isprint(c4
))
338 DbgPrint("Size 0x%x Tag 0x%x (%c%c%c%c) Allocator 0x%x\n",
339 current
->Size
, current
->Tag
, c4
, c3
, c2
, c1
,
344 DbgPrint("Size 0x%x Tag 0x%x Allocator 0x%x\n",
345 current
->Size
, current
->Tag
, current
->Caller
);
347 current
->Dumped
= TRUE
;
349 current_entry
= current_entry
->Flink
;
351 DbgPrint("***************** Dump Complete ***************\n");
352 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
355 #ifdef ENABLE_VALIDATE_POOL
356 static void validate_free_list(void)
358 * FUNCTION: Validate the integrity of the list of free blocks
362 PLIST_ENTRY current_entry
;
363 unsigned int blocks_seen
=0;
365 current_entry
= FreeBlockListHead
.Flink
;
366 while (current_entry
!= &FreeBlockListHead
)
368 unsigned int base_addr
;
370 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
371 base_addr
= (int)current
;
373 if (current
->Magic
!= BLOCK_HDR_FREE_MAGIC
)
375 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
377 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT
);
380 if (base_addr
< (kernel_pool_base
) ||
381 (base_addr
+current
->Size
) > (kernel_pool_base
)+NONPAGED_POOL_SIZE
)
383 DbgPrint("Block %x found outside pool area\n",current
);
384 DbgPrint("Size %d\n",current
->Size
);
385 DbgPrint("Limits are %x %x\n",kernel_pool_base
,
386 kernel_pool_base
+NONPAGED_POOL_SIZE
);
387 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT
);
390 if (blocks_seen
> EiNrFreeBlocks
)
392 DbgPrint("Too many blocks on free list\n");
393 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT
);
395 if (current
->ListEntry
.Flink
!= &FreeBlockListHead
&&
396 current
->ListEntry
.Flink
->Blink
!= ¤t
->ListEntry
)
398 DbgPrint("%s:%d:Break in list (current %x next %x "
399 "current->next->previous %x)\n",
400 __FILE__
,__LINE__
,current
, current
->ListEntry
.Flink
,
401 current
->ListEntry
.Flink
->Blink
);
402 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT
);
405 current_entry
= current_entry
->Flink
;
409 static void validate_used_list(void)
411 * FUNCTION: Validate the integrity of the list of used blocks
415 PLIST_ENTRY current_entry
;
416 unsigned int blocks_seen
=0;
418 current_entry
= UsedBlockListHead
.Flink
;
419 while (current_entry
!= &UsedBlockListHead
)
421 unsigned int base_addr
;
423 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
424 base_addr
= (int)current
;
426 if (current
->Magic
!= BLOCK_HDR_USED_MAGIC
)
428 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
430 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT
);
432 if (base_addr
< (kernel_pool_base
) ||
433 (base_addr
+current
->size
) >
434 (kernel_pool_base
)+NONPAGED_POOL_SIZE
)
436 DbgPrint("Block %x found outside pool area\n",current
);
440 if (blocks_seen
> EiNrUsedBlocks
)
442 DbgPrint("Too many blocks on used list\n");
445 if (current
->ListEntry
.Flink
!= &UsedBlockListHead
&&
446 current
->ListEntry
.Flink
->Blink
!= ¤t
->ListEntry
)
448 DbgPrint("Break in list (current %x next %x)\n",
449 current
, current
->ListEntry
.Flink
);
453 current_entry
= current_entry
->Flink
;
457 static void check_duplicates(BLOCK_HDR
* blk
)
459 * FUNCTION: Check a block has no duplicates
461 * blk = block to check
462 * NOTE: Bug checks if duplicates are found
465 unsigned int base
= (int)blk
;
466 unsigned int last
= ((int)blk
) + +sizeof(BLOCK_HDR
) + blk
->size
;
468 PLIST_ENTRY current_entry
;
470 current_entry
= FreeBlockListHead
.Flink
;
471 while (current_entry
!= &FreeBlockListHead
)
473 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
475 if (current
->Magic
!= BLOCK_HDR_FREE_MAGIC
)
477 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
479 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT
);
482 if ( (int)current
> base
&& (int)current
< last
)
484 DbgPrint("intersecting blocks on list\n");
487 if ( (int)current
< base
&&
488 ((int)current
+ current
->size
+ sizeof(BLOCK_HDR
))
491 DbgPrint("intersecting blocks on list\n");
495 current_entry
= current_entry
->Flink
;
498 current_entry
= UsedBlockListHead
.Flink
;
499 while (current_entry
!= &UsedBlockListHead
)
501 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
503 if ( (int)current
> base
&& (int)current
< last
)
505 DbgPrint("intersecting blocks on list\n");
508 if ( (int)current
< base
&&
509 ((int)current
+ current
->size
+ sizeof(BLOCK_HDR
))
512 DbgPrint("intersecting blocks on list\n");
516 current_entry
= current_entry
->Flink
;
521 static void validate_kernel_pool(void)
523 * FUNCTION: Checks the integrity of the kernel memory heap
527 PLIST_ENTRY current_entry
;
529 validate_free_list();
530 validate_used_list();
532 current_entry
= FreeBlockListHead
.Flink
;
533 while (current_entry
!= &FreeBlockListHead
)
535 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
536 check_duplicates(current
);
537 current_entry
= current_entry
->Flink
;
539 current_entry
= UsedBlockListHead
.Flink
;
540 while (current_entry
!= &UsedBlockListHead
)
542 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
543 check_duplicates(current
);
544 current_entry
= current_entry
->Flink
;
551 free_pages(BLOCK_HDR
* blk
)
558 end
= (ULONG
)blk
+ sizeof(BLOCK_HDR
) + blk
->Size
;
561 * If the block doesn't contain a whole page then there is nothing to do
563 if (PAGE_ROUND_UP(start
) >= PAGE_ROUND_DOWN(end
))
571 merge_free_block(BLOCK_HDR
* blk
)
573 PLIST_ENTRY next_entry
;
575 PLIST_ENTRY previous_entry
;
578 next_entry
= blk
->ListEntry
.Flink
;
579 if (next_entry
!= &FreeBlockListHead
)
581 next
= CONTAINING_RECORD(next_entry
, BLOCK_HDR
, ListEntry
);
582 if (((unsigned int)blk
+ sizeof(BLOCK_HDR
) + blk
->Size
) ==
585 RemoveEntryList(&next
->ListEntry
);
586 blk
->Size
= blk
->Size
+ sizeof(BLOCK_HDR
) + next
->Size
;
591 previous_entry
= blk
->ListEntry
.Blink
;
592 if (previous_entry
!= &FreeBlockListHead
)
594 previous
= CONTAINING_RECORD(previous_entry
, BLOCK_HDR
, ListEntry
);
595 if (((unsigned int)previous
+ sizeof(BLOCK_HDR
) + previous
->Size
) ==
598 RemoveEntryList(&blk
->ListEntry
);
599 previous
->Size
= previous
->Size
+ sizeof(BLOCK_HDR
) + blk
->Size
;
606 add_to_free_list(BLOCK_HDR
* blk
)
608 * FUNCTION: add the block to the free list (internal)
611 PLIST_ENTRY current_entry
;
614 current_entry
= FreeBlockListHead
.Flink
;
615 while (current_entry
!= &FreeBlockListHead
)
617 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
619 if ((unsigned int)current
> (unsigned int)blk
)
621 blk
->ListEntry
.Flink
= current_entry
;
622 blk
->ListEntry
.Blink
= current_entry
->Blink
;
623 current_entry
->Blink
->Flink
= &blk
->ListEntry
;
624 current_entry
->Blink
= &blk
->ListEntry
;
629 current_entry
= current_entry
->Flink
;
631 InsertTailList(&FreeBlockListHead
, &blk
->ListEntry
);
635 static void add_to_used_list(BLOCK_HDR
* blk
)
637 * FUNCTION: add the block to the used list (internal)
640 InsertHeadList(&UsedBlockListHead
, &blk
->ListEntry
);
645 static void remove_from_free_list(BLOCK_HDR
* current
)
647 RemoveEntryList(¤t
->ListEntry
);
652 static void remove_from_used_list(BLOCK_HDR
* current
)
654 RemoveEntryList(¤t
->ListEntry
);
659 inline static void* block_to_address(BLOCK_HDR
* blk
)
661 * FUNCTION: Translate a block header address to the corresponding block
665 return ( (void *) ((int)blk
+ sizeof(BLOCK_HDR
)) );
668 inline static BLOCK_HDR
* address_to_block(void* addr
)
671 ( ((int)addr
) - sizeof(BLOCK_HDR
) );
674 static BLOCK_HDR
* grow_kernel_pool(unsigned int size
, ULONG Tag
, PVOID Caller
)
676 * FUNCTION: Grow the executive heap to accomodate a block of at least 'size'
680 unsigned int total_size
= size
+ sizeof(BLOCK_HDR
);
681 unsigned int nr_pages
= PAGE_ROUND_UP(total_size
) / PAGESIZE
;
682 unsigned int start
= (ULONG
)MiAllocNonPagedPoolRegion(nr_pages
);
683 BLOCK_HDR
* used_blk
=NULL
;
684 BLOCK_HDR
* free_blk
=NULL
;
688 OLD_DPRINT("growing heap for block size %d, ",size
);
689 OLD_DPRINT("start %x\n",start
);
691 for (i
=0;i
<nr_pages
;i
++)
693 Status
= MmCreateVirtualMapping(NULL
,
694 (PVOID
)(start
+ (i
*PAGESIZE
)),
696 (ULONG
)MmAllocPage(0));
697 if (!NT_SUCCESS(Status
))
699 DbgPrint("Unable to create virtual mapping\n");
705 if ((PAGESIZE
-(total_size
%PAGESIZE
))>(2*sizeof(BLOCK_HDR
)))
707 used_blk
= (struct _BLOCK_HDR
*)start
;
708 OLD_DPRINT("Creating block at %x\n",start
);
709 used_blk
->Magic
= BLOCK_HDR_USED_MAGIC
;
710 used_blk
->Size
= size
;
711 add_to_used_list(used_blk
);
713 free_blk
= (BLOCK_HDR
*)(start
+ sizeof(BLOCK_HDR
) + size
);
714 OLD_DPRINT("Creating block at %x\n",free_blk
);
715 free_blk
->Magic
= BLOCK_HDR_FREE_MAGIC
;
716 free_blk
->Size
= (nr_pages
* PAGESIZE
) -((sizeof(BLOCK_HDR
)*2) + size
);
717 add_to_free_list(free_blk
);
719 EiFreeNonPagedPool
= EiFreeNonPagedPool
+ free_blk
->Size
;
720 EiUsedNonPagedPool
= EiUsedNonPagedPool
+ used_blk
->Size
;
724 used_blk
= (struct _BLOCK_HDR
*)start
;
725 used_blk
->Magic
= BLOCK_HDR_USED_MAGIC
;
726 used_blk
->Size
= (nr_pages
* PAGESIZE
) - sizeof(BLOCK_HDR
);
727 add_to_used_list(used_blk
);
729 EiUsedNonPagedPool
= EiUsedNonPagedPool
+ used_blk
->Size
;
733 used_blk
->Caller
= Caller
;
734 used_blk
->Dumped
= FALSE
;
735 #ifdef TAG_STATISTICS_TRACKING
736 MiAddToTagHashTable(used_blk
);
737 #endif /* TAG_STATISTICS_TRACKING */
743 static void* take_block(BLOCK_HDR
* current
, unsigned int size
,
744 ULONG Tag
, PVOID Caller
)
746 * FUNCTION: Allocate a used block of least 'size' from the specified
748 * RETURNS: The address of the created memory block
752 * If the block is much bigger than required then split it and
753 * return a pointer to the allocated section. If the difference
754 * between the sizes is marginal it makes no sense to have the
757 if (current
->Size
> (1 + size
+ sizeof(BLOCK_HDR
)))
761 EiFreeNonPagedPool
= EiFreeNonPagedPool
- current
->Size
;
764 * Replace the bigger block with a smaller block in the
765 * same position in the list
767 free_blk
= (BLOCK_HDR
*)(((int)current
)
768 + sizeof(BLOCK_HDR
) + size
);
769 free_blk
->Magic
= BLOCK_HDR_FREE_MAGIC
;
770 InsertHeadList(¤t
->ListEntry
, &free_blk
->ListEntry
);
771 free_blk
->Size
= current
->Size
- (sizeof(BLOCK_HDR
) + size
);
774 RemoveEntryList(¤t
->ListEntry
);
775 InsertHeadList(&UsedBlockListHead
, ¤t
->ListEntry
);
777 current
->Magic
= BLOCK_HDR_USED_MAGIC
;
779 current
->Caller
= Caller
;
780 current
->Dumped
= FALSE
;
781 #ifdef TAG_STATISTICS_TRACKING
782 MiAddToTagHashTable(current
);
783 #endif /* TAG_STATISTICS_TRACKING */
785 EiUsedNonPagedPool
= EiUsedNonPagedPool
+ current
->Size
;
786 EiFreeNonPagedPool
= EiFreeNonPagedPool
+ free_blk
->Size
;
789 return(block_to_address(current
));
793 * Otherwise allocate the whole block
795 remove_from_free_list(current
);
796 add_to_used_list(current
);
798 EiFreeNonPagedPool
= EiFreeNonPagedPool
- current
->Size
;
799 EiUsedNonPagedPool
= EiUsedNonPagedPool
+ current
->Size
;
801 current
->Magic
= BLOCK_HDR_USED_MAGIC
;
803 current
->Caller
= Caller
;
804 current
->Dumped
= FALSE
;
805 #ifdef TAG_STATISTICS_TRACKING
806 MiAddToTagHashTable(current
);
807 #endif /* TAG_STATISTICS_TRACKING */
810 return(block_to_address(current
));
813 VOID STDCALL
ExFreePool (PVOID block
)
815 * FUNCTION: Releases previously allocated memory
817 * block = block to free
820 BLOCK_HDR
* blk
=address_to_block(block
);
823 OLD_DPRINT("(%s:%d) freeing block %x\n",__FILE__
,__LINE__
,blk
);
825 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block
,blk
->size
,
826 ((PULONG
)&block
)[-1]);
828 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
832 if (blk
->Magic
!= BLOCK_HDR_USED_MAGIC
)
834 if (blk
->Magic
== BLOCK_HDR_FREE_MAGIC
)
836 DbgPrint("ExFreePool of already freed address %x\n", block
);
840 DbgPrint("ExFreePool of non-allocated address %x (magic %x)\n",
847 memset(block
, 0xcc, blk
->Size
);
849 #ifdef TAG_STATISTICS_TRACKING
850 MiRemoveFromTagHashTable(blk
);
851 #endif /* TAG_STATISTICS_TRACKING */
852 remove_from_used_list(blk
);
853 blk
->Magic
= BLOCK_HDR_FREE_MAGIC
;
854 add_to_free_list(blk
);
855 merge_free_block(blk
);
857 EiUsedNonPagedPool
= EiUsedNonPagedPool
- blk
->Size
;
858 EiFreeNonPagedPool
= EiFreeNonPagedPool
+ blk
->Size
;
862 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
866 ExAllocateNonPagedPoolWithTag(ULONG Type
, ULONG Size
, ULONG Tag
, PVOID Caller
)
868 BLOCK_HDR
* current
= NULL
;
869 PLIST_ENTRY current_entry
;
871 BLOCK_HDR
* best
= NULL
;
874 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
877 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
882 * accomodate this useful idiom
886 POOL_TRACE("= NULL\n");
887 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
892 * Look for an already created block of sufficent size
894 current_entry
= FreeBlockListHead
.Flink
;
895 while (current_entry
!= &FreeBlockListHead
)
897 OLD_DPRINT("current %x size %x next %x\n",current
,current
->size
,
899 current
= CONTAINING_RECORD(current_entry
, BLOCK_HDR
, ListEntry
);
900 if (current
->Size
>= Size
&&
901 (best
== NULL
|| current
->Size
< best
->Size
))
905 current_entry
= current_entry
->Flink
;
909 block
=take_block(best
, Size
, Tag
, Caller
);
911 memset(block
,0,Size
);
912 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
918 * Otherwise create a new block
920 block
=block_to_address(grow_kernel_pool(Size
, Tag
, Caller
));
922 memset(block
, 0, Size
);
923 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);