1 /* $Id: npool.c,v 1.26 2000/03/01 22:52:28 ea Exp $
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/pool.c
6 * PURPOSE: Implements the kernel memory pool
7 * PROGRAMMER: David Welch (welch@cwcom.net)
10 * 10/06/98: Bug fixes by Iwan Fatahi (i_fatahi@hotmail.com)
11 * in take_block (if current bigger than required)
12 * in remove_from_used_list
14 * 23/08/98: Fixes from Robert Bergkvist (fragdance@hotmail.com)
17 /* INCLUDES ****************************************************************/
19 #include <ddk/ntddk.h>
21 #include <internal/string.h>
22 #include <internal/stddef.h>
23 #include <internal/mm.h>
24 #include <internal/mmhal.h>
25 #include <internal/bitops.h>
26 #include <internal/ntoskrnl.h>
27 #include <internal/pool.h>
30 #include <internal/debug.h>
35 #define VALIDATE_POOL validate_kernel_pool()
41 #define POOL_TRACE(args...) do { DbgPrint(args); } while(0);
43 #define POOL_TRACE(args...)
46 /* TYPES *******************************************************************/
48 #define BLOCK_HDR_MAGIC (0xdeadbeef)
51 * fields present at the start of a block (this is for internal use only)
53 typedef struct _block_hdr
57 struct _block_hdr
* previous
;
58 struct _block_hdr
* next
;
62 /* GLOBALS *****************************************************************/
65 * Memory managment initalized symbol for the base of the pool
67 static unsigned int kernel_pool_base
= 0;
70 * Pointer to the first block in the free list
72 static block_hdr
* free_list_head
= NULL
;
73 static block_hdr
* used_list_head
= NULL
;
74 static ULONG nr_free_blocks
;
75 ULONG EiNrUsedBlocks
= 0;
77 #define ALLOC_MAP_SIZE (NONPAGED_POOL_SIZE / PAGESIZE)
80 * One bit for each page in the kmalloc region
81 * If set then the page is used by a kmalloc block
83 static unsigned int alloc_map
[ALLOC_MAP_SIZE
/32]={0,};
84 static KSPIN_LOCK AllocMapLock
;
86 static KSPIN_LOCK MmNpoolLock
;
88 unsigned int EiFreeNonPagedPool
= 0;
89 unsigned int EiUsedNonPagedPool
= 0;
91 /* FUNCTIONS ***************************************************************/
93 VOID
ExUnmapPage(PVOID Addr
)
96 ULONG i
= ((ULONG
)Addr
- kernel_pool_base
) / PAGESIZE
;
98 DPRINT("ExUnmapPage(Addr %x)\n",Addr
);
101 KeAcquireSpinLock(&AllocMapLock
, &oldIrql
);
102 MmSetPage(NULL
, (PVOID
)Addr
, 0, 0);
103 clear_bit(i
%32, &alloc_map
[i
/32]);
104 KeReleaseSpinLock(&AllocMapLock
, oldIrql
);
107 PVOID
ExAllocatePage(VOID
)
114 PhysPage
= (ULONG
)MmAllocPage();
115 DPRINT("Allocated page %x\n",PhysPage
);
121 KeAcquireSpinLock(&AllocMapLock
, &oldlvl
);
122 for (i
=1; i
<ALLOC_MAP_SIZE
;i
++)
124 if (!test_bit(i
%32,&alloc_map
[i
/32]))
127 set_bit(i
%32,&alloc_map
[i
/32]);
128 addr
= kernel_pool_base
+ (i
*PAGESIZE
);
129 MmSetPage(NULL
, (PVOID
)addr
, PAGE_READWRITE
, PhysPage
);
130 KeReleaseSpinLock(&AllocMapLock
, oldlvl
);
134 KeReleaseSpinLock(&AllocMapLock
, oldlvl
);
138 VOID
ExInitNonPagedPool(ULONG BaseAddress
)
140 kernel_pool_base
= BaseAddress
;
141 KeInitializeSpinLock(&AllocMapLock
);
142 KeInitializeSpinLock(&MmNpoolLock
);
146 static void validate_free_list(void)
148 * FUNCTION: Validate the integrity of the list of free blocks
151 block_hdr
* current
=free_list_head
;
152 unsigned int blocks_seen
=0;
154 while (current
!=NULL
)
156 unsigned int base_addr
= (int)current
;
158 if (current
->magic
!= BLOCK_HDR_MAGIC
)
160 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
162 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT
);
165 if (base_addr
< (kernel_pool_base
) ||
166 (base_addr
+current
->size
) >
167 (kernel_pool_base
)+NONPAGED_POOL_SIZE
)
169 DbgPrint("Block %x found outside pool area\n",current
);
170 DbgPrint("Size %d\n",current
->size
);
171 DbgPrint("Limits are %x %x\n",kernel_pool_base
,
172 kernel_pool_base
+NONPAGED_POOL_SIZE
);
173 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT
);
176 if (blocks_seen
> nr_free_blocks
)
178 DbgPrint("Too many blocks on list\n");
179 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT
);
181 // verify_for_write(base_addr,current->size);
182 if (current
->next
!=NULL
&¤t
->next
->previous
!=current
)
184 DbgPrint("%s:%d:Break in list (current %x next %x "
185 "current->next->previous %x)\n",
186 __FILE__
,__LINE__
,current
,current
->next
,
187 current
->next
->previous
);
188 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT
);
190 current
=current
->next
;
194 static void validate_used_list(void)
196 * FUNCTION: Validate the integrity of the list of used blocks
199 block_hdr
* current
=used_list_head
;
200 unsigned int blocks_seen
=0;
202 while (current
!=NULL
)
204 unsigned int base_addr
= (int)current
;
206 if (current
->magic
!= BLOCK_HDR_MAGIC
)
208 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
210 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT
);
212 if (base_addr
< (kernel_pool_base
) ||
213 (base_addr
+current
->size
) >
214 (kernel_pool_base
)+NONPAGED_POOL_SIZE
)
216 DbgPrint("Block %x found outside pool area\n",current
);
220 if (blocks_seen
> EiNrUsedBlocks
)
222 DbgPrint("Too many blocks on list\n");
225 // verify_for_write(base_addr,current->size);
226 if (current
->next
!=NULL
&¤t
->next
->previous
!=current
)
228 DbgPrint("Break in list (current %x next %x)\n",
229 current
,current
->next
);
232 current
=current
->next
;
236 static void check_duplicates(block_hdr
* blk
)
238 * FUNCTION: Check a block has no duplicates
240 * blk = block to check
241 * NOTE: Bug checks if duplicates are found
244 unsigned int base
= (int)blk
;
245 unsigned int last
= ((int)blk
) + +sizeof(block_hdr
) + blk
->size
;
247 block_hdr
* current
=free_list_head
;
248 while (current
!=NULL
)
250 if (current
->magic
!= BLOCK_HDR_MAGIC
)
252 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
254 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT
);
257 if ( (int)current
> base
&& (int)current
< last
)
259 DbgPrint("intersecting blocks on list\n");
262 if ( (int)current
< base
&&
263 ((int)current
+ current
->size
+ sizeof(block_hdr
))
266 DbgPrint("intersecting blocks on list\n");
269 current
=current
->next
;
271 current
=used_list_head
;
272 while (current
!=NULL
)
274 if ( (int)current
> base
&& (int)current
< last
)
276 DbgPrint("intersecting blocks on list\n");
279 if ( (int)current
< base
&&
280 ((int)current
+ current
->size
+ sizeof(block_hdr
))
283 DbgPrint("intersecting blocks on list\n");
286 current
=current
->next
;
291 static void validate_kernel_pool(void)
293 * FUNCTION: Checks the integrity of the kernel memory heap
296 block_hdr
* current
=NULL
;
298 validate_free_list();
299 validate_used_list();
301 current
=free_list_head
;
302 while (current
!=NULL
)
304 check_duplicates(current
);
305 current
=current
->next
;
307 current
=used_list_head
;
308 while (current
!=NULL
)
310 check_duplicates(current
);
311 current
=current
->next
;
316 static void add_to_free_list(block_hdr
* blk
)
318 * FUNCTION: add the block to the free list (internal)
321 blk
->next
=free_list_head
;
323 if (free_list_head
!=NULL
)
325 free_list_head
->previous
=blk
;
331 static void add_to_used_list(block_hdr
* blk
)
333 * FUNCTION: add the block to the used list (internal)
336 blk
->next
=used_list_head
;
338 if (used_list_head
!=NULL
)
340 used_list_head
->previous
=blk
;
347 static void remove_from_free_list(block_hdr
* current
)
349 if (current
->next
==NULL
&¤t
->previous
==NULL
)
355 if (current
->next
==NULL
)
357 current
->previous
->next
=NULL
;
359 else if (current
->previous
==NULL
)
361 current
->next
->previous
=NULL
;
362 free_list_head
=current
->next
;
366 current
->next
->previous
=current
->previous
;
367 current
->previous
->next
=current
->next
;
374 static void remove_from_used_list(block_hdr
* current
)
376 if (current
->next
==NULL
&¤t
->previous
==NULL
)
382 if (current
->previous
==NULL
)
384 current
->next
->previous
=NULL
;
385 used_list_head
=current
->next
;
389 current
->previous
->next
=current
->next
;
391 if (current
->next
!=NULL
)
393 current
->next
->previous
=current
->previous
;
397 current
->previous
->next
=NULL
;
404 inline static void* block_to_address(block_hdr
* blk
)
406 * FUNCTION: Translate a block header address to the corresponding block
410 return ( (void *) ((int)blk
+ sizeof(block_hdr
)) );
413 inline static block_hdr
* address_to_block(void* addr
)
416 ( ((int)addr
) - sizeof(block_hdr
) );
419 static unsigned int alloc_pool_region(unsigned int nr_pages
)
421 * FUNCTION: Allocates a region of pages within the nonpaged pool area
424 unsigned int start
= 0;
425 unsigned int length
= 0;
428 OLD_DPRINT("alloc_pool_region(nr_pages = %d)\n",nr_pages
);
430 for (i
=1; i
<ALLOC_MAP_SIZE
;i
++)
432 if (!test_bit(i
%32,&alloc_map
[i
/32]))
443 if (length
==nr_pages
)
445 OLD_DPRINT("found region at %d for %d\n",start
,
447 for (j
=start
;j
<(start
+length
);j
++)
449 set_bit(j
%32,&alloc_map
[j
/32]);
451 OLD_DPRINT("returning %x\n",(start
*PAGESIZE
)
453 return((start
*PAGESIZE
)+kernel_pool_base
);
462 DbgPrint("CRITICAL: Out of non-paged pool space\n");
467 static block_hdr
* grow_kernel_pool(unsigned int size
)
469 * FUNCTION: Grow the executive heap to accomodate a block of at least 'size'
473 unsigned int total_size
= size
+ sizeof(block_hdr
);
474 unsigned int nr_pages
= PAGE_ROUND_UP(total_size
) / PAGESIZE
;
475 unsigned int start
= alloc_pool_region(nr_pages
);
476 block_hdr
* used_blk
=NULL
;
477 block_hdr
* free_blk
=NULL
;
480 OLD_DPRINT("growing heap for block size %d, ",size
);
481 OLD_DPRINT("start %x\n",start
);
483 for (i
=0;i
<nr_pages
;i
++)
486 (PVOID
)(start
+ (i
*PAGESIZE
)),
488 (ULONG
)MmAllocPage());
492 if ((PAGESIZE
-(total_size
%PAGESIZE
))>(2*sizeof(block_hdr
)))
494 used_blk
= (struct _block_hdr
*)start
;
495 OLD_DPRINT("Creating block at %x\n",start
);
496 used_blk
->magic
= BLOCK_HDR_MAGIC
;
497 used_blk
->size
= size
;
498 add_to_used_list(used_blk
);
500 free_blk
= (block_hdr
*)(start
+ sizeof(block_hdr
) + size
);
501 OLD_DPRINT("Creating block at %x\n",free_blk
);
502 free_blk
->magic
= BLOCK_HDR_MAGIC
;
503 free_blk
->size
= (nr_pages
* PAGESIZE
) -((sizeof(block_hdr
)*2) + size
);
504 add_to_free_list(free_blk
);
506 EiFreeNonPagedPool
= EiFreeNonPagedPool
+ free_blk
->size
;
507 EiUsedNonPagedPool
= EiUsedNonPagedPool
+ used_blk
->size
;
511 used_blk
= (struct _block_hdr
*)start
;
512 used_blk
->magic
= BLOCK_HDR_MAGIC
;
513 used_blk
->size
= nr_pages
* PAGESIZE
;
514 add_to_used_list(used_blk
);
516 EiUsedNonPagedPool
= EiUsedNonPagedPool
+ used_blk
->size
;
523 static void* take_block(block_hdr
* current
, unsigned int size
)
525 * FUNCTION: Allocate a used block of least 'size' from the specified
527 * RETURNS: The address of the created memory block
531 * If the block is much bigger than required then split it and
532 * return a pointer to the allocated section. If the difference
533 * between the sizes is marginal it makes no sense to have the
536 if (current
->size
> (1 + size
+ sizeof(block_hdr
)))
540 EiFreeNonPagedPool
= EiFreeNonPagedPool
- current
->size
;
543 * Replace the bigger block with a smaller block in the
544 * same position in the list
546 free_blk
= (block_hdr
*)(((int)current
)
547 + sizeof(block_hdr
) + size
);
548 free_blk
->magic
= BLOCK_HDR_MAGIC
;
549 free_blk
->next
= current
->next
;
550 free_blk
->previous
= current
->previous
;
553 current
->next
->previous
= free_blk
;
555 if (current
->previous
)
557 current
->previous
->next
= free_blk
;
559 free_blk
->size
= current
->size
- (sizeof(block_hdr
) + size
);
560 if (current
==free_list_head
)
562 free_list_head
=free_blk
;
566 add_to_used_list(current
);
568 EiUsedNonPagedPool
= EiUsedNonPagedPool
+ current
->size
;
569 EiFreeNonPagedPool
= EiFreeNonPagedPool
+ free_blk
->size
;
572 return(block_to_address(current
));
576 * Otherwise allocate the whole block
578 remove_from_free_list(current
);
579 add_to_used_list(current
);
581 EiFreeNonPagedPool
= EiFreeNonPagedPool
- current
->size
;
582 EiUsedNonPagedPool
= EiUsedNonPagedPool
+ current
->size
;
585 return(block_to_address(current
));
588 asmlinkage VOID
ExFreePool(PVOID block
)
590 * FUNCTION: Releases previously allocated memory
592 * block = block to free
595 block_hdr
* blk
=address_to_block(block
);
598 OLD_DPRINT("(%s:%d) freeing block %x\n",__FILE__
,__LINE__
,blk
);
600 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block
,blk
->size
,
601 ((PULONG
)&block
)[-1]);
603 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
605 memset(block
, 0xcc, blk
->size
);
609 if (blk
->magic
!= BLOCK_HDR_MAGIC
)
611 DbgPrint("ExFreePool of non-allocated address %x\n",block
);
617 * Please don't change the order
619 remove_from_used_list(blk
);
620 add_to_free_list(blk
);
622 EiUsedNonPagedPool
= EiUsedNonPagedPool
- blk
->size
;
623 EiFreeNonPagedPool
= EiFreeNonPagedPool
+ blk
->size
;
627 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
630 PVOID STDCALL
ExAllocateNonPagedPoolWithTag(ULONG type
,
635 block_hdr
* current
= NULL
;
637 block_hdr
* best
= NULL
;
640 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
643 KeAcquireSpinLock(&MmNpoolLock
, &oldIrql
);
645 // DbgPrint("Blocks on free list %d\n",nr_free_blocks);
646 // DbgPrint("Blocks on used list %d\n",eiNrUsedblocks);
647 // OLD_DPRINT("ExAllocateNonPagedPool(type %d, size %d)\n",type,size);
651 * accomodate this useful idiom
655 POOL_TRACE("= NULL\n");
656 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
661 * Look for an already created block of sufficent size
663 current
=free_list_head
;
665 // defrag_free_list();
667 while (current
!=NULL
)
669 OLD_DPRINT("current %x size %x next %x\n",current
,current
->size
,
671 if (current
->size
>=size
&&
673 current
->size
< best
->size
))
677 current
=current
->next
;
681 OLD_DPRINT("found block %x of size %d\n",best
,size
);
682 block
=take_block(best
,size
);
684 memset(block
,0,size
);
685 POOL_TRACE("= %x\n",block
);
686 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);
692 * Otherwise create a new block
694 block
=block_to_address(grow_kernel_pool(size
));
696 memset(block
,0,size
);
697 POOL_TRACE("= %x\n",block
);
698 KeReleaseSpinLock(&MmNpoolLock
, oldIrql
);