2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/pool.c
5 * PURPOSE: Implements the kernel memory pool
6 * PROGRAMMER: David Welch (welch@mcmail.com)
9 * 10/06/98: Bug fixes by Iwan Fatahi (i_fatahi@hotmail.com)
10 * in take_block (if current bigger than required)
11 * in remove_from_used_list
15 /* INCLUDES ****************************************************************/
17 #include <internal/stddef.h>
18 #include <internal/mm.h>
19 #include <internal/hal/page.h>
20 #include <internal/pool.h>
21 #include <internal/bitops.h>
22 #include <internal/kernel.h>
25 #include <internal/debug.h>
27 #include <ddk/ntddk.h>
29 /* TYPES *******************************************************************/
32 * fields present at the start of a block (this is for internal use only)
34 typedef struct _block_hdr
37 struct _block_hdr
* previous
;
38 struct _block_hdr
* next
;
41 /* GLOBALS *****************************************************************/
44 * Memory managment initalized symbol for the base of the pool
46 extern unsigned int kernel_pool_base
;
49 * Pointer to the first block in the free list
51 static block_hdr
* free_list_head
= NULL
;
52 static block_hdr
* used_list_head
= NULL
;
53 static unsigned int nr_free_blocks
= 0;
54 static unsigned int nr_used_blocks
= 0;
56 #define ALLOC_MAP_SIZE (NONPAGED_POOL_SIZE / PAGESIZE)
59 * One bit for each page in the kmalloc region
60 * If set then the page is used by a kmalloc block
62 static unsigned int alloc_map
[ALLOC_MAP_SIZE
/32]={0,};
64 /* FUNCTIONS ***************************************************************/
66 static void validate_free_list(void)
68 * FUNCTION: Validate the integrity of the list of free blocks
71 block_hdr
* current
=free_list_head
;
72 unsigned int blocks_seen
=0;
76 unsigned int base_addr
= (int)current
;
77 if (base_addr
< (kernel_pool_base
) ||
78 (base_addr
+current
->size
) >
79 (kernel_pool_base
)+NONPAGED_POOL_SIZE
)
81 printk("Block %x found outside pool area\n",current
);
82 printk("Size %d\n",current
->size
);
83 printk("Limits are %x %x\n",kernel_pool_base
,
84 kernel_pool_base
+NONPAGED_POOL_SIZE
);
85 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT
);
88 if (blocks_seen
> nr_free_blocks
)
90 printk("Too many blocks on list\n");
91 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT
);
93 // verify_for_write(base_addr,current->size);
94 if (current
->next
!=NULL
&¤t
->next
->previous
!=current
)
96 printk("%s:%d:Break in list (current %x next %x "
97 "current->next->previous %x)\n",
98 __FILE__
,__LINE__
,current
,current
->next
,
99 current
->next
->previous
);
100 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT
);
102 current
=current
->next
;
106 static void validate_used_list(void)
108 * FUNCTION: Validate the integrity of the list of used blocks
111 block_hdr
* current
=used_list_head
;
112 unsigned int blocks_seen
=0;
114 while (current
!=NULL
)
116 unsigned int base_addr
= (int)current
;
117 if (base_addr
< (kernel_pool_base
) ||
118 (base_addr
+current
->size
) >
119 (kernel_pool_base
)+NONPAGED_POOL_SIZE
)
121 printk("Block %x found outside pool area\n",current
);
125 if (blocks_seen
> nr_used_blocks
)
127 printk("Too many blocks on list\n");
130 // verify_for_write(base_addr,current->size);
131 if (current
->next
!=NULL
&¤t
->next
->previous
!=current
)
133 printk("Break in list (current %x next %x)\n",
134 current
,current
->next
);
137 current
=current
->next
;
142 static void check_duplicates(block_hdr
* blk
)
144 * FUNCTION: Check a block has no duplicates
146 * blk = block to check
147 * NOTE: Bug checks if duplicates are found
150 unsigned int base
= (int)blk
;
151 unsigned int last
= ((int)blk
) + +sizeof(block_hdr
) + blk
->size
;
153 block_hdr
* current
=free_list_head
;
154 while (current
!=NULL
)
156 if ( (int)current
> base
&& (int)current
< last
)
158 printk("intersecting blocks on list\n");
161 if ( (int)current
< base
&&
162 ((int)current
+ current
->size
+ sizeof(block_hdr
))
165 printk("intersecting blocks on list\n");
168 current
=current
->next
;
170 current
=used_list_head
;
171 while (current
!=NULL
)
173 if ( (int)current
> base
&& (int)current
< last
)
175 printk("intersecting blocks on list\n");
178 if ( (int)current
< base
&&
179 ((int)current
+ current
->size
+ sizeof(block_hdr
))
182 printk("intersecting blocks on list\n");
185 current
=current
->next
;
190 static void validate_kernel_pool(void)
192 * FUNCTION: Checks the integrity of the kernel memory heap
195 block_hdr
* current
=NULL
;
197 validate_free_list();
198 validate_used_list();
200 current
=free_list_head
;
201 while (current
!=NULL
)
203 check_duplicates(current
);
204 current
=current
->next
;
206 current
=used_list_head
;
207 while (current
!=NULL
)
209 check_duplicates(current
);
210 current
=current
->next
;
214 static void add_to_free_list(block_hdr
* blk
)
216 * FUNCTION: add the block to the free list (internal)
219 blk
->next
=free_list_head
;
221 if (free_list_head
!=NULL
)
223 free_list_head
->previous
=blk
;
229 static void add_to_used_list(block_hdr
* blk
)
231 * FUNCTION: add the block to the used list (internal)
234 blk
->next
=used_list_head
;
236 if (used_list_head
!=NULL
)
238 used_list_head
->previous
=blk
;
245 static void remove_from_free_list(block_hdr
* current
)
247 if (current
->next
==NULL
&¤t
->previous
==NULL
)
253 if (current
->previous
!=NULL
)
255 current
->previous
->next
=current
->next
;
257 if (current
->next
!=NULL
)
259 DPRINT("current->previous %x\n",current
->previous
);
260 current
->next
->previous
=current
->previous
;
266 #ifdef BROKEN_VERSION_OF_REMOVE_FROM_FREE_LIST
267 static void remove_from_free_list(block_hdr
* current
)
269 if (current
->next
==NULL
&¤t
->previous
==NULL
)
275 if (current
->next
==NULL
)
277 current
->previous
->next
=NULL
;
281 current
->previous
->next
=current
->next
;
283 if (current
->previous
==NULL
)
285 current
->next
->previous
=NULL
;
289 current
->next
->previous
=current
->previous
;
296 static void remove_from_used_list(block_hdr
* current
)
298 if (current
->next
==NULL
&¤t
->previous
==NULL
)
304 if (current
->previous
==NULL
)
306 current
->next
->previous
=NULL
;
307 used_list_head
=current
->next
;
311 current
->previous
->next
=current
->next
;
313 if (current
->next
!=NULL
)
315 current
->next
->previous
=current
->previous
;
319 current
->previous
->next
=NULL
;
326 inline static void* block_to_address(block_hdr
* blk
)
328 * FUNCTION: Translate a block header address to the corresponding block
332 return ( (void *) ((int)blk
+ sizeof(block_hdr
)) );
335 inline static block_hdr
* address_to_block(void* addr
)
338 ( ((int)addr
) - sizeof(block_hdr
) );
341 static unsigned int alloc_pool_region(unsigned int nr_pages
)
343 * FUNCTION: Allocates a region of pages within the nonpaged pool area
346 unsigned int start
= 0;
347 unsigned int length
= 0;
350 DPRINT("alloc_pool_region(nr_pages = %d)\n",nr_pages
);
352 for (i
=1; i
<ALLOC_MAP_SIZE
;i
++)
354 if (!test_bit(i
%32,&alloc_map
[i
/32]))
365 if (length
==nr_pages
)
367 DPRINT("found region at %d for %d\n",start
,
369 for (j
=start
;j
<(start
+length
);j
++)
371 DPRINT("Writing %x\n",&alloc_map
[j
/32]);
372 set_bit(j
%32,&alloc_map
[j
/32]);
374 DPRINT("returning %x\n",(start
*PAGESIZE
)
376 return((start
*PAGESIZE
)+kernel_pool_base
);
385 printk("CRITICAL: Out of kmalloc space\n");
390 static block_hdr
* grow_kernel_pool(unsigned int size
)
392 * FUNCTION: Grow the executive heap to accomodate a block of at least 'size'
396 unsigned int total_size
= size
+ sizeof(block_hdr
);
397 unsigned int nr_pages
= PAGE_ROUND_UP(total_size
) / PAGESIZE
;
398 unsigned int start
= alloc_pool_region(nr_pages
);
399 block_hdr
* used_blk
=NULL
;
400 block_hdr
* free_blk
=NULL
;
403 DPRINT("growing heap for block size %d, ",size
);
404 DPRINT("start %x\n",start
);
406 for (i
=0;i
<nr_pages
;i
++)
408 set_page(start
+(i
*PAGESIZE
),PA_SYSTEM
| PA_WRITE
| PA_READ
,
413 if ((PAGESIZE
-(total_size
%PAGESIZE
))>(2*sizeof(block_hdr
)))
415 used_blk
= (struct _block_hdr
*)start
;
416 DPRINT("Creating block at %x\n",start
);
417 used_blk
->size
= size
;
418 add_to_used_list(used_blk
);
420 free_blk
= (block_hdr
*)(start
+ sizeof(block_hdr
) + size
);
421 DPRINT("Creating block at %x\n",free_blk
);
422 free_blk
->size
= (nr_pages
* PAGESIZE
) -((sizeof(block_hdr
)*2) + size
);
423 add_to_free_list(free_blk
);
427 used_blk
= (struct _block_hdr
*)start
;
428 used_blk
->size
= nr_pages
* PAGESIZE
;
429 add_to_used_list(used_blk
);
432 validate_kernel_pool();
436 static void* take_block(block_hdr
* current
, unsigned int size
)
438 * FUNCTION: Allocate a used block of least 'size' from the specified
440 * RETURNS: The address of the created memory block
444 * If the block is much bigger than required then split it and
445 * return a pointer to the allocated section. If the difference
446 * between the sizes is marginal it makes no sense to have the
449 if (current
->size
> (1 + size
+ sizeof(block_hdr
)))
452 * Replace the bigger block with a smaller block in the
453 * same position in the list
455 block_hdr
* free_blk
= (block_hdr
*)(((int)current
)
456 + sizeof(block_hdr
) + size
);
457 free_blk
->next
= current
->next
;
458 free_blk
->previous
= current
->previous
;
461 current
->next
->previous
= free_blk
;
463 if (current
->previous
)
465 current
->previous
->next
= free_blk
;
467 free_blk
->size
= current
->size
- (sizeof(block_hdr
) + size
);
468 if (current
==free_list_head
)
470 free_list_head
=free_blk
;
474 add_to_used_list(current
);
476 validate_kernel_pool();
477 return(block_to_address(current
));
481 * Otherwise allocate the whole block
483 remove_from_free_list(current
);
484 add_to_used_list(current
);
486 validate_kernel_pool();
487 return(block_to_address(current
));
490 asmlinkage VOID
ExFreePool(PVOID block
)
492 * FUNCTION: Releases previously allocated memory
494 * block = block to free
497 block_hdr
* blk
=address_to_block(block
);
498 DPRINT("(%s:%d) freeing block %x\n",__FILE__
,__LINE__
,blk
);
500 validate_kernel_pool();
502 * Please don't change the order
504 remove_from_used_list(blk
);
505 add_to_free_list(blk
);
507 validate_kernel_pool();
510 #define CACHE_ALIGNMENT (16)
512 PVOID
ExAllocatePool(ULONG type
, ULONG size
)
514 * FUNCTION: Allocates memory from the pool
516 * size = minimum size of the block to be allocated
517 * type = the type of memory to use for the block
519 * the address of the block if it succeeds
524 if (type
== NonPagedPoolCacheAligned
||
525 type
== NonPagedPoolCacheAlignedMustS
)
527 size
= size
+ CACHE_ALIGNMENT
;
533 case NonPagedPoolMustSucceed
:
534 case NonPagedPoolCacheAligned
:
535 case NonPagedPoolCacheAlignedMustS
:
536 Block
= ExAllocateNonPagedPool(type
,size
);
540 case PagedPoolCacheAligned
:
541 Block
= ExAllocatePagedPool(type
,size
);
548 if ((type
==NonPagedPoolMustSucceed
|| type
==NonPagedPoolCacheAlignedMustS
)
551 KeBugCheck(MUST_SUCCEED_POOL_EMPTY
);
553 if (type
== NonPagedPoolCacheAligned
||
554 type
== NonPagedPoolCacheAlignedMustS
)
556 Block
= Block
+ CACHE_ALIGNMENT
- (((int)Block
)%CACHE_ALIGNMENT
);
561 static PVOID
ExAllocatePagedPool(ULONG type
, ULONG size
)
566 static PVOID
ExAllocateNonPagedPool(ULONG type
, ULONG size
)
568 block_hdr
* current
=NULL
;
570 DPRINT("kmalloc(size %d)\n",size
);
571 validate_kernel_pool();
574 * accomodate this useful idiom
582 * Look for an already created block of sufficent size
584 current
=free_list_head
;
586 while (current
!=NULL
)
588 DPRINT("current %x size %x next %x\n",current
,current
->size
,
590 if (current
->size
>=size
)
592 DPRINT("found block %x of size %d\n",current
,size
);
593 return(take_block(current
,size
));
595 current
=current
->next
;
599 * Otherwise create a new block
601 return(block_to_address(grow_kernel_pool(size
)));
604 PVOID
ExAllocatePoolWithQuota(POOL_TYPE PoolType
, ULONG NumberOfBytes
)
607 PKTHREAD current
= KeGetCurrentThread();
609 Block
= ExAllocatePool(PoolType
,NumberOfBytes
);
613 case NonPagedPoolMustSucceed
:
614 case NonPagedPoolCacheAligned
:
615 case NonPagedPoolCacheAlignedMustS
:
616 // current->NPagedPoolQuota = current->NPagedPoolQuota - NumberOfBytes;
620 case PagedPoolCacheAligned
:
621 // current->PagedPoolQuota = current->PagedPoolQuota - NumberOfBytes;
627 PVOID
ExAllocatePoolWithQuotaTag(POOL_TYPE PoolType
, ULONG NumberOfBytes
,
631 Block
=ExAllocatePoolWithQuota(PoolType
,NumberOfBytes
+sizeof(ULONG
));
632 ((ULONG
*)Block
)[0]=Tag
;
636 PVOID
ExAllocatePoolWithTag(POOL_TYPE PoolType
, ULONG NumberOfBytes
,
639 * FUNCTION: Allocates pool memory and inserts a caller supplied tag before
640 * the block allocated
642 * PoolType = Type of memory to allocate
643 * NumberOfBytes = Number of bytes to allocate
645 * RETURNS: The address of the block allocated
649 Block
=ExAllocatePool(PoolType
,NumberOfBytes
+sizeof(ULONG
));
650 ((ULONG
*)Block
)[0]=Tag
;