2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/pool.c
5 * PURPOSE: Implements the kernel memory pool
6 * PROGRAMMER: David Welch (welch@cwcom.net)
9 * 10/06/98: Bug fixes by Iwan Fatahi (i_fatahi@hotmail.com)
10 * in take_block (if current bigger than required)
11 * in remove_from_used_list
13 * 23/08/98: Fixes from Robert Bergkvist (fragdance@hotmail.com)
16 /* INCLUDES ****************************************************************/
18 #include <ddk/ntddk.h>
20 #include <internal/string.h>
21 #include <internal/stddef.h>
22 #include <internal/mm.h>
23 #include <internal/mmhal.h>
24 #include <internal/bitops.h>
25 #include <internal/ntoskrnl.h>
28 #include <internal/debug.h>
33 #define VALIDATE_POOL validate_kernel_pool()
39 #define POOL_TRACE(args...) do { DbgPrint(args); } while(0);
41 #define POOL_TRACE(args...)
44 /* TYPES *******************************************************************/
46 #define BLOCK_HDR_MAGIC (0xdeadbeef)
49 * fields present at the start of a block (this is for internal use only)
51 typedef struct _block_hdr
55 struct _block_hdr
* previous
;
56 struct _block_hdr
* next
;
60 /* GLOBALS *****************************************************************/
63 * Memory managment initalized symbol for the base of the pool
65 static unsigned int kernel_pool_base
= 0;
68 * Pointer to the first block in the free list
70 static block_hdr
* free_list_head
= NULL
;
71 static block_hdr
* used_list_head
= NULL
;
72 static ULONG nr_free_blocks
;
73 ULONG EiNrUsedBlocks
= 0;
75 #define ALLOC_MAP_SIZE (NONPAGED_POOL_SIZE / PAGESIZE)
78 * One bit for each page in the kmalloc region
79 * If set then the page is used by a kmalloc block
81 static unsigned int alloc_map
[ALLOC_MAP_SIZE
/32]={0,};
82 static KSPIN_LOCK AllocMapLock
;
84 unsigned int EiFreeNonPagedPool
= 0;
85 unsigned int EiUsedNonPagedPool
= 0;
87 /* FUNCTIONS ***************************************************************/
89 VOID
ExUnmapPage(PVOID Addr
)
92 ULONG i
= ((ULONG
)Addr
- kernel_pool_base
) / PAGESIZE
;
94 DPRINT("ExUnmapPage(Addr %x)\n",Addr
);
97 KeAcquireSpinLock(&AllocMapLock
, &oldIrql
);
98 MmSetPage(NULL
, (PVOID
)Addr
, 0, 0);
99 clear_bit(i
%32, &alloc_map
[i
/32]);
100 KeReleaseSpinLock(&AllocMapLock
, oldIrql
);
103 PVOID
ExAllocatePage(VOID
)
110 PhysPage
= (ULONG
)MmAllocPage();
111 DPRINT("Allocated page %x\n",PhysPage
);
117 KeAcquireSpinLock(&AllocMapLock
, &oldlvl
);
118 for (i
=1; i
<ALLOC_MAP_SIZE
;i
++)
120 if (!test_bit(i
%32,&alloc_map
[i
/32]))
123 set_bit(i
%32,&alloc_map
[i
/32]);
124 addr
= kernel_pool_base
+ (i
*PAGESIZE
);
125 MmSetPage(NULL
, (PVOID
)addr
, PAGE_READWRITE
, PhysPage
);
126 KeReleaseSpinLock(&AllocMapLock
, oldlvl
);
130 KeReleaseSpinLock(&AllocMapLock
, oldlvl
);
134 VOID
ExInitNonPagedPool(ULONG BaseAddress
)
136 kernel_pool_base
= BaseAddress
;
137 KeInitializeSpinLock(&AllocMapLock
);
141 static void validate_free_list(void)
143 * FUNCTION: Validate the integrity of the list of free blocks
146 block_hdr
* current
=free_list_head
;
147 unsigned int blocks_seen
=0;
149 while (current
!=NULL
)
151 unsigned int base_addr
= (int)current
;
153 if (current
->magic
!= BLOCK_HDR_MAGIC
)
155 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
157 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT
);
160 if (base_addr
< (kernel_pool_base
) ||
161 (base_addr
+current
->size
) >
162 (kernel_pool_base
)+NONPAGED_POOL_SIZE
)
164 printk("Block %x found outside pool area\n",current
);
165 printk("Size %d\n",current
->size
);
166 printk("Limits are %x %x\n",kernel_pool_base
,
167 kernel_pool_base
+NONPAGED_POOL_SIZE
);
168 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT
);
171 if (blocks_seen
> nr_free_blocks
)
173 printk("Too many blocks on list\n");
174 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT
);
176 // verify_for_write(base_addr,current->size);
177 if (current
->next
!=NULL
&¤t
->next
->previous
!=current
)
179 printk("%s:%d:Break in list (current %x next %x "
180 "current->next->previous %x)\n",
181 __FILE__
,__LINE__
,current
,current
->next
,
182 current
->next
->previous
);
183 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT
);
185 current
=current
->next
;
189 static void validate_used_list(void)
191 * FUNCTION: Validate the integrity of the list of used blocks
194 block_hdr
* current
=used_list_head
;
195 unsigned int blocks_seen
=0;
197 while (current
!=NULL
)
199 unsigned int base_addr
= (int)current
;
201 if (current
->magic
!= BLOCK_HDR_MAGIC
)
203 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
205 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT
);
207 if (base_addr
< (kernel_pool_base
) ||
208 (base_addr
+current
->size
) >
209 (kernel_pool_base
)+NONPAGED_POOL_SIZE
)
211 printk("Block %x found outside pool area\n",current
);
215 if (blocks_seen
> EiNrUsedBlocks
)
217 printk("Too many blocks on list\n");
220 // verify_for_write(base_addr,current->size);
221 if (current
->next
!=NULL
&¤t
->next
->previous
!=current
)
223 printk("Break in list (current %x next %x)\n",
224 current
,current
->next
);
227 current
=current
->next
;
231 static void check_duplicates(block_hdr
* blk
)
233 * FUNCTION: Check a block has no duplicates
235 * blk = block to check
236 * NOTE: Bug checks if duplicates are found
239 unsigned int base
= (int)blk
;
240 unsigned int last
= ((int)blk
) + +sizeof(block_hdr
) + blk
->size
;
242 block_hdr
* current
=free_list_head
;
243 while (current
!=NULL
)
245 if (current
->magic
!= BLOCK_HDR_MAGIC
)
247 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
249 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT
);
252 if ( (int)current
> base
&& (int)current
< last
)
254 printk("intersecting blocks on list\n");
257 if ( (int)current
< base
&&
258 ((int)current
+ current
->size
+ sizeof(block_hdr
))
261 printk("intersecting blocks on list\n");
264 current
=current
->next
;
266 current
=used_list_head
;
267 while (current
!=NULL
)
269 if ( (int)current
> base
&& (int)current
< last
)
271 printk("intersecting blocks on list\n");
274 if ( (int)current
< base
&&
275 ((int)current
+ current
->size
+ sizeof(block_hdr
))
278 printk("intersecting blocks on list\n");
281 current
=current
->next
;
286 static void validate_kernel_pool(void)
288 * FUNCTION: Checks the integrity of the kernel memory heap
291 block_hdr
* current
=NULL
;
293 validate_free_list();
294 validate_used_list();
296 current
=free_list_head
;
297 while (current
!=NULL
)
299 check_duplicates(current
);
300 current
=current
->next
;
302 current
=used_list_head
;
303 while (current
!=NULL
)
305 check_duplicates(current
);
306 current
=current
->next
;
311 static void add_to_free_list(block_hdr
* blk
)
313 * FUNCTION: add the block to the free list (internal)
316 blk
->next
=free_list_head
;
318 if (free_list_head
!=NULL
)
320 free_list_head
->previous
=blk
;
326 static void add_to_used_list(block_hdr
* blk
)
328 * FUNCTION: add the block to the used list (internal)
331 blk
->next
=used_list_head
;
333 if (used_list_head
!=NULL
)
335 used_list_head
->previous
=blk
;
342 static void remove_from_free_list(block_hdr
* current
)
344 if (current
->next
==NULL
&¤t
->previous
==NULL
)
350 if (current
->next
==NULL
)
352 current
->previous
->next
=NULL
;
354 else if (current
->previous
==NULL
)
356 current
->next
->previous
=NULL
;
357 free_list_head
=current
->next
;
361 current
->next
->previous
=current
->previous
;
362 current
->previous
->next
=current
->next
;
369 static void remove_from_used_list(block_hdr
* current
)
371 if (current
->next
==NULL
&¤t
->previous
==NULL
)
377 if (current
->previous
==NULL
)
379 current
->next
->previous
=NULL
;
380 used_list_head
=current
->next
;
384 current
->previous
->next
=current
->next
;
386 if (current
->next
!=NULL
)
388 current
->next
->previous
=current
->previous
;
392 current
->previous
->next
=NULL
;
399 inline static void* block_to_address(block_hdr
* blk
)
401 * FUNCTION: Translate a block header address to the corresponding block
405 return ( (void *) ((int)blk
+ sizeof(block_hdr
)) );
408 inline static block_hdr
* address_to_block(void* addr
)
411 ( ((int)addr
) - sizeof(block_hdr
) );
414 static unsigned int alloc_pool_region(unsigned int nr_pages
)
416 * FUNCTION: Allocates a region of pages within the nonpaged pool area
419 unsigned int start
= 0;
420 unsigned int length
= 0;
423 OLD_DPRINT("alloc_pool_region(nr_pages = %d)\n",nr_pages
);
425 for (i
=1; i
<ALLOC_MAP_SIZE
;i
++)
427 if (!test_bit(i
%32,&alloc_map
[i
/32]))
438 if (length
==nr_pages
)
440 OLD_DPRINT("found region at %d for %d\n",start
,
442 for (j
=start
;j
<(start
+length
);j
++)
444 set_bit(j
%32,&alloc_map
[j
/32]);
446 OLD_DPRINT("returning %x\n",(start
*PAGESIZE
)
448 return((start
*PAGESIZE
)+kernel_pool_base
);
457 printk("CRITICAL: Out of non-paged pool space\n");
462 static block_hdr
* grow_kernel_pool(unsigned int size
)
464 * FUNCTION: Grow the executive heap to accomodate a block of at least 'size'
468 unsigned int total_size
= size
+ sizeof(block_hdr
);
469 unsigned int nr_pages
= PAGE_ROUND_UP(total_size
) / PAGESIZE
;
470 unsigned int start
= alloc_pool_region(nr_pages
);
471 block_hdr
* used_blk
=NULL
;
472 block_hdr
* free_blk
=NULL
;
475 OLD_DPRINT("growing heap for block size %d, ",size
);
476 OLD_DPRINT("start %x\n",start
);
478 for (i
=0;i
<nr_pages
;i
++)
481 (PVOID
)(start
+ (i
*PAGESIZE
)),
483 (ULONG
)MmAllocPage());
487 if ((PAGESIZE
-(total_size
%PAGESIZE
))>(2*sizeof(block_hdr
)))
489 used_blk
= (struct _block_hdr
*)start
;
490 OLD_DPRINT("Creating block at %x\n",start
);
491 used_blk
->magic
= BLOCK_HDR_MAGIC
;
492 used_blk
->size
= size
;
493 add_to_used_list(used_blk
);
495 free_blk
= (block_hdr
*)(start
+ sizeof(block_hdr
) + size
);
496 OLD_DPRINT("Creating block at %x\n",free_blk
);
497 free_blk
->magic
= BLOCK_HDR_MAGIC
;
498 free_blk
->size
= (nr_pages
* PAGESIZE
) -((sizeof(block_hdr
)*2) + size
);
499 add_to_free_list(free_blk
);
501 EiFreeNonPagedPool
= EiFreeNonPagedPool
+ free_blk
->size
;
502 EiUsedNonPagedPool
= EiUsedNonPagedPool
+ used_blk
->size
;
506 used_blk
= (struct _block_hdr
*)start
;
507 used_blk
->magic
= BLOCK_HDR_MAGIC
;
508 used_blk
->size
= nr_pages
* PAGESIZE
;
509 add_to_used_list(used_blk
);
511 EiUsedNonPagedPool
= EiUsedNonPagedPool
+ used_blk
->size
;
518 static void* take_block(block_hdr
* current
, unsigned int size
)
520 * FUNCTION: Allocate a used block of least 'size' from the specified
522 * RETURNS: The address of the created memory block
526 * If the block is much bigger than required then split it and
527 * return a pointer to the allocated section. If the difference
528 * between the sizes is marginal it makes no sense to have the
531 if (current
->size
> (1 + size
+ sizeof(block_hdr
)))
535 EiFreeNonPagedPool
= EiFreeNonPagedPool
- current
->size
;
538 * Replace the bigger block with a smaller block in the
539 * same position in the list
541 free_blk
= (block_hdr
*)(((int)current
)
542 + sizeof(block_hdr
) + size
);
543 free_blk
->magic
= BLOCK_HDR_MAGIC
;
544 free_blk
->next
= current
->next
;
545 free_blk
->previous
= current
->previous
;
548 current
->next
->previous
= free_blk
;
550 if (current
->previous
)
552 current
->previous
->next
= free_blk
;
554 free_blk
->size
= current
->size
- (sizeof(block_hdr
) + size
);
555 if (current
==free_list_head
)
557 free_list_head
=free_blk
;
561 add_to_used_list(current
);
563 EiUsedNonPagedPool
= EiUsedNonPagedPool
+ current
->size
;
564 EiFreeNonPagedPool
= EiFreeNonPagedPool
+ free_blk
->size
;
567 return(block_to_address(current
));
571 * Otherwise allocate the whole block
573 remove_from_free_list(current
);
574 add_to_used_list(current
);
576 EiFreeNonPagedPool
= EiFreeNonPagedPool
- current
->size
;
577 EiUsedNonPagedPool
= EiUsedNonPagedPool
+ current
->size
;
580 return(block_to_address(current
));
583 asmlinkage VOID
ExFreePool(PVOID block
)
585 * FUNCTION: Releases previously allocated memory
587 * block = block to free
590 block_hdr
* blk
=address_to_block(block
);
591 OLD_DPRINT("(%s:%d) freeing block %x\n",__FILE__
,__LINE__
,blk
);
593 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block
,blk
->size
,
594 ((PULONG
)&block
)[-1]);
598 if (blk
->magic
!= BLOCK_HDR_MAGIC
)
600 DbgPrint("ExFreePool of non-allocated address %x\n",block
);
606 * Please don't change the order
608 remove_from_used_list(blk
);
609 add_to_free_list(blk
);
611 EiUsedNonPagedPool
= EiUsedNonPagedPool
- blk
->size
;
612 EiFreeNonPagedPool
= EiFreeNonPagedPool
+ blk
->size
;
617 PVOID
ExAllocateNonPagedPoolWithTag(ULONG type
,
622 block_hdr
* current
= NULL
;
624 block_hdr
* best
= NULL
;
626 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
629 // DbgPrint("Blocks on free list %d\n",nr_free_blocks);
630 // DbgPrint("Blocks on used list %d\n",eiNrUsedblocks);
631 // OLD_DPRINT("ExAllocateNonPagedPool(type %d, size %d)\n",type,size);
635 * accomodate this useful idiom
639 POOL_TRACE("= NULL\n");
644 * Look for an already created block of sufficent size
646 current
=free_list_head
;
648 // defrag_free_list();
650 while (current
!=NULL
)
652 OLD_DPRINT("current %x size %x next %x\n",current
,current
->size
,
654 if (current
->size
>=size
&&
656 current
->size
< best
->size
))
660 current
=current
->next
;
664 OLD_DPRINT("found block %x of size %d\n",best
,size
);
665 block
=take_block(best
,size
);
667 memset(block
,0,size
);
668 POOL_TRACE("= %x\n",block
);
674 * Otherwise create a new block
676 block
=block_to_address(grow_kernel_pool(size
));
678 memset(block
,0,size
);
679 POOL_TRACE("= %x\n",block
);