eb193273353a746781ad98c4a0cc68716082ab56
[reactos.git] / reactos / ntoskrnl / mm / npool.c
1 /* $Id: npool.c,v 1.26 2000/03/01 22:52:28 ea Exp $
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/pool.c
6 * PURPOSE: Implements the kernel memory pool
7 * PROGRAMMER: David Welch (welch@cwcom.net)
8 * UPDATE HISTORY:
9 * 27/05/98: Created
10 * 10/06/98: Bug fixes by Iwan Fatahi (i_fatahi@hotmail.com)
11 * in take_block (if current bigger than required)
12 * in remove_from_used_list
13 * in ExFreePool
14 * 23/08/98: Fixes from Robert Bergkvist (fragdance@hotmail.com)
15 */
16
17 /* INCLUDES ****************************************************************/
18
19 #include <ddk/ntddk.h>
20 #include <string.h>
21 #include <internal/string.h>
22 #include <internal/stddef.h>
23 #include <internal/mm.h>
24 #include <internal/mmhal.h>
25 #include <internal/bitops.h>
26 #include <internal/ntoskrnl.h>
27 #include <internal/pool.h>
28
29 #define NDEBUG
30 #include <internal/debug.h>
31
32
33
34 #if 0
35 #define VALIDATE_POOL validate_kernel_pool()
36 #else
37 #define VALIDATE_POOL
38 #endif
39
40 #if 0
41 #define POOL_TRACE(args...) do { DbgPrint(args); } while(0);
42 #else
43 #define POOL_TRACE(args...)
44 #endif
45
46 /* TYPES *******************************************************************/
47
48 #define BLOCK_HDR_MAGIC (0xdeadbeef)
49
50 /*
51 * fields present at the start of a block (this is for internal use only)
52 */
53 typedef struct _block_hdr
54 {
55 ULONG magic;
56 ULONG size;
57 struct _block_hdr* previous;
58 struct _block_hdr* next;
59 ULONG tag;
60 } block_hdr;
61
62 /* GLOBALS *****************************************************************/
63
64 /*
65 * Memory managment initalized symbol for the base of the pool
66 */
67 static unsigned int kernel_pool_base = 0;
68
69 /*
70 * Pointer to the first block in the free list
71 */
72 static block_hdr* free_list_head = NULL;
73 static block_hdr* used_list_head = NULL;
74 static ULONG nr_free_blocks;
75 ULONG EiNrUsedBlocks = 0;
76
77 #define ALLOC_MAP_SIZE (NONPAGED_POOL_SIZE / PAGESIZE)
78
79 /*
80 * One bit for each page in the kmalloc region
81 * If set then the page is used by a kmalloc block
82 */
83 static unsigned int alloc_map[ALLOC_MAP_SIZE/32]={0,};
84 static KSPIN_LOCK AllocMapLock;
85
86 static KSPIN_LOCK MmNpoolLock;
87
88 unsigned int EiFreeNonPagedPool = 0;
89 unsigned int EiUsedNonPagedPool = 0;
90
91 /* FUNCTIONS ***************************************************************/
92
93 VOID ExUnmapPage(PVOID Addr)
94 {
95 KIRQL oldIrql;
96 ULONG i = ((ULONG)Addr - kernel_pool_base) / PAGESIZE;
97
98 DPRINT("ExUnmapPage(Addr %x)\n",Addr);
99 DPRINT("i %x\n",i);
100
101 KeAcquireSpinLock(&AllocMapLock, &oldIrql);
102 MmSetPage(NULL, (PVOID)Addr, 0, 0);
103 clear_bit(i%32, &alloc_map[i/32]);
104 KeReleaseSpinLock(&AllocMapLock, oldIrql);
105 }
106
107 PVOID ExAllocatePage(VOID)
108 {
109 KIRQL oldlvl;
110 ULONG addr;
111 ULONG i;
112 ULONG PhysPage;
113
114 PhysPage = (ULONG)MmAllocPage();
115 DPRINT("Allocated page %x\n",PhysPage);
116 if (PhysPage == 0)
117 {
118 return(NULL);
119 }
120
121 KeAcquireSpinLock(&AllocMapLock, &oldlvl);
122 for (i=1; i<ALLOC_MAP_SIZE;i++)
123 {
124 if (!test_bit(i%32,&alloc_map[i/32]))
125 {
126 DPRINT("i %x\n",i);
127 set_bit(i%32,&alloc_map[i/32]);
128 addr = kernel_pool_base + (i*PAGESIZE);
129 MmSetPage(NULL, (PVOID)addr, PAGE_READWRITE, PhysPage);
130 KeReleaseSpinLock(&AllocMapLock, oldlvl);
131 return((PVOID)addr);
132 }
133 }
134 KeReleaseSpinLock(&AllocMapLock, oldlvl);
135 return(NULL);
136 }
137
138 VOID ExInitNonPagedPool(ULONG BaseAddress)
139 {
140 kernel_pool_base = BaseAddress;
141 KeInitializeSpinLock(&AllocMapLock);
142 KeInitializeSpinLock(&MmNpoolLock);
143 }
144
145 #if 0
146 static void validate_free_list(void)
147 /*
148 * FUNCTION: Validate the integrity of the list of free blocks
149 */
150 {
151 block_hdr* current=free_list_head;
152 unsigned int blocks_seen=0;
153
154 while (current!=NULL)
155 {
156 unsigned int base_addr = (int)current;
157
158 if (current->magic != BLOCK_HDR_MAGIC)
159 {
160 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
161 current);
162 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
163 }
164
165 if (base_addr < (kernel_pool_base) ||
166 (base_addr+current->size) >
167 (kernel_pool_base)+NONPAGED_POOL_SIZE)
168 {
169 DbgPrint("Block %x found outside pool area\n",current);
170 DbgPrint("Size %d\n",current->size);
171 DbgPrint("Limits are %x %x\n",kernel_pool_base,
172 kernel_pool_base+NONPAGED_POOL_SIZE);
173 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
174 }
175 blocks_seen++;
176 if (blocks_seen > nr_free_blocks)
177 {
178 DbgPrint("Too many blocks on list\n");
179 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
180 }
181 // verify_for_write(base_addr,current->size);
182 if (current->next!=NULL&&current->next->previous!=current)
183 {
184 DbgPrint("%s:%d:Break in list (current %x next %x "
185 "current->next->previous %x)\n",
186 __FILE__,__LINE__,current,current->next,
187 current->next->previous);
188 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
189 }
190 current=current->next;
191 }
192 }
193
194 static void validate_used_list(void)
195 /*
196 * FUNCTION: Validate the integrity of the list of used blocks
197 */
198 {
199 block_hdr* current=used_list_head;
200 unsigned int blocks_seen=0;
201
202 while (current!=NULL)
203 {
204 unsigned int base_addr = (int)current;
205
206 if (current->magic != BLOCK_HDR_MAGIC)
207 {
208 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
209 current);
210 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
211 }
212 if (base_addr < (kernel_pool_base) ||
213 (base_addr+current->size) >
214 (kernel_pool_base)+NONPAGED_POOL_SIZE)
215 {
216 DbgPrint("Block %x found outside pool area\n",current);
217 for(;;);
218 }
219 blocks_seen++;
220 if (blocks_seen > EiNrUsedBlocks)
221 {
222 DbgPrint("Too many blocks on list\n");
223 for(;;);
224 }
225 // verify_for_write(base_addr,current->size);
226 if (current->next!=NULL&&current->next->previous!=current)
227 {
228 DbgPrint("Break in list (current %x next %x)\n",
229 current,current->next);
230 for(;;);
231 }
232 current=current->next;
233 }
234 }
235
236 static void check_duplicates(block_hdr* blk)
237 /*
238 * FUNCTION: Check a block has no duplicates
239 * ARGUMENTS:
240 * blk = block to check
241 * NOTE: Bug checks if duplicates are found
242 */
243 {
244 unsigned int base = (int)blk;
245 unsigned int last = ((int)blk) + +sizeof(block_hdr) + blk->size;
246
247 block_hdr* current=free_list_head;
248 while (current!=NULL)
249 {
250 if (current->magic != BLOCK_HDR_MAGIC)
251 {
252 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
253 current);
254 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
255 }
256
257 if ( (int)current > base && (int)current < last )
258 {
259 DbgPrint("intersecting blocks on list\n");
260 for(;;);
261 }
262 if ( (int)current < base &&
263 ((int)current + current->size + sizeof(block_hdr))
264 > base )
265 {
266 DbgPrint("intersecting blocks on list\n");
267 for(;;);
268 }
269 current=current->next;
270 }
271 current=used_list_head;
272 while (current!=NULL)
273 {
274 if ( (int)current > base && (int)current < last )
275 {
276 DbgPrint("intersecting blocks on list\n");
277 for(;;);
278 }
279 if ( (int)current < base &&
280 ((int)current + current->size + sizeof(block_hdr))
281 > base )
282 {
283 DbgPrint("intersecting blocks on list\n");
284 for(;;);
285 }
286 current=current->next;
287 }
288
289 }
290
291 static void validate_kernel_pool(void)
292 /*
293 * FUNCTION: Checks the integrity of the kernel memory heap
294 */
295 {
296 block_hdr* current=NULL;
297
298 validate_free_list();
299 validate_used_list();
300
301 current=free_list_head;
302 while (current!=NULL)
303 {
304 check_duplicates(current);
305 current=current->next;
306 }
307 current=used_list_head;
308 while (current!=NULL)
309 {
310 check_duplicates(current);
311 current=current->next;
312 }
313 }
314 #endif
315
316 static void add_to_free_list(block_hdr* blk)
317 /*
318 * FUNCTION: add the block to the free list (internal)
319 */
320 {
321 blk->next=free_list_head;
322 blk->previous=NULL;
323 if (free_list_head!=NULL)
324 {
325 free_list_head->previous=blk;
326 }
327 free_list_head=blk;
328 nr_free_blocks++;
329 }
330
331 static void add_to_used_list(block_hdr* blk)
332 /*
333 * FUNCTION: add the block to the used list (internal)
334 */
335 {
336 blk->next=used_list_head;
337 blk->previous=NULL;
338 if (used_list_head!=NULL)
339 {
340 used_list_head->previous=blk;
341 }
342 used_list_head=blk;
343 EiNrUsedBlocks++;
344 }
345
346
347 static void remove_from_free_list(block_hdr* current)
348 {
349 if (current->next==NULL&&current->previous==NULL)
350 {
351 free_list_head=NULL;
352 }
353 else
354 {
355 if (current->next==NULL)
356 {
357 current->previous->next=NULL;
358 }
359 else if (current->previous==NULL)
360 {
361 current->next->previous=NULL;
362 free_list_head=current->next;
363 }
364 else
365 {
366 current->next->previous=current->previous;
367 current->previous->next=current->next;
368 }
369 }
370 nr_free_blocks--;
371 }
372
373
374 static void remove_from_used_list(block_hdr* current)
375 {
376 if (current->next==NULL&&current->previous==NULL)
377 {
378 used_list_head=NULL;
379 }
380 else
381 {
382 if (current->previous==NULL)
383 {
384 current->next->previous=NULL;
385 used_list_head=current->next;
386 }
387 else
388 {
389 current->previous->next=current->next;
390 }
391 if (current->next!=NULL)
392 {
393 current->next->previous=current->previous;
394 }
395 else
396 {
397 current->previous->next=NULL;
398 }
399 }
400 EiNrUsedBlocks--;
401 }
402
403
404 inline static void* block_to_address(block_hdr* blk)
405 /*
406 * FUNCTION: Translate a block header address to the corresponding block
407 * address (internal)
408 */
409 {
410 return ( (void *) ((int)blk + sizeof(block_hdr)) );
411 }
412
413 inline static block_hdr* address_to_block(void* addr)
414 {
415 return (block_hdr *)
416 ( ((int)addr) - sizeof(block_hdr) );
417 }
418
419 static unsigned int alloc_pool_region(unsigned int nr_pages)
420 /*
421 * FUNCTION: Allocates a region of pages within the nonpaged pool area
422 */
423 {
424 unsigned int start = 0;
425 unsigned int length = 0;
426 unsigned int i,j;
427
428 OLD_DPRINT("alloc_pool_region(nr_pages = %d)\n",nr_pages);
429
430 for (i=1; i<ALLOC_MAP_SIZE;i++)
431 {
432 if (!test_bit(i%32,&alloc_map[i/32]))
433 {
434 if (length == 0)
435 {
436 start=i;
437 length = 1;
438 }
439 else
440 {
441 length++;
442 }
443 if (length==nr_pages)
444 {
445 OLD_DPRINT("found region at %d for %d\n",start,
446 length);
447 for (j=start;j<(start+length);j++)
448 {
449 set_bit(j%32,&alloc_map[j/32]);
450 }
451 OLD_DPRINT("returning %x\n",(start*PAGESIZE)
452 +kernel_pool_base);
453 return((start*PAGESIZE)+kernel_pool_base);
454 }
455 }
456 else
457 {
458 start=0;
459 length=0;
460 }
461 }
462 DbgPrint("CRITICAL: Out of non-paged pool space\n");
463 for(;;);
464 return(0);
465 }
466
467 static block_hdr* grow_kernel_pool(unsigned int size)
468 /*
469 * FUNCTION: Grow the executive heap to accomodate a block of at least 'size'
470 * bytes
471 */
472 {
473 unsigned int total_size = size + sizeof(block_hdr);
474 unsigned int nr_pages = PAGE_ROUND_UP(total_size) / PAGESIZE;
475 unsigned int start = alloc_pool_region(nr_pages);
476 block_hdr* used_blk=NULL;
477 block_hdr* free_blk=NULL;
478 int i;
479
480 OLD_DPRINT("growing heap for block size %d, ",size);
481 OLD_DPRINT("start %x\n",start);
482
483 for (i=0;i<nr_pages;i++)
484 {
485 MmSetPage(NULL,
486 (PVOID)(start + (i*PAGESIZE)),
487 PAGE_READWRITE,
488 (ULONG)MmAllocPage());
489 }
490
491
492 if ((PAGESIZE-(total_size%PAGESIZE))>(2*sizeof(block_hdr)))
493 {
494 used_blk = (struct _block_hdr *)start;
495 OLD_DPRINT("Creating block at %x\n",start);
496 used_blk->magic = BLOCK_HDR_MAGIC;
497 used_blk->size = size;
498 add_to_used_list(used_blk);
499
500 free_blk = (block_hdr *)(start + sizeof(block_hdr) + size);
501 OLD_DPRINT("Creating block at %x\n",free_blk);
502 free_blk->magic = BLOCK_HDR_MAGIC;
503 free_blk->size = (nr_pages * PAGESIZE) -((sizeof(block_hdr)*2) + size);
504 add_to_free_list(free_blk);
505
506 EiFreeNonPagedPool = EiFreeNonPagedPool + free_blk->size;
507 EiUsedNonPagedPool = EiUsedNonPagedPool + used_blk->size;
508 }
509 else
510 {
511 used_blk = (struct _block_hdr *)start;
512 used_blk->magic = BLOCK_HDR_MAGIC;
513 used_blk->size = nr_pages * PAGESIZE;
514 add_to_used_list(used_blk);
515
516 EiUsedNonPagedPool = EiUsedNonPagedPool + used_blk->size;
517 }
518
519 VALIDATE_POOL;
520 return(used_blk);
521 }
522
523 static void* take_block(block_hdr* current, unsigned int size)
524 /*
525 * FUNCTION: Allocate a used block of least 'size' from the specified
526 * free block
527 * RETURNS: The address of the created memory block
528 */
529 {
530 /*
531 * If the block is much bigger than required then split it and
532 * return a pointer to the allocated section. If the difference
533 * between the sizes is marginal it makes no sense to have the
534 * extra overhead
535 */
536 if (current->size > (1 + size + sizeof(block_hdr)))
537 {
538 block_hdr* free_blk;
539
540 EiFreeNonPagedPool = EiFreeNonPagedPool - current->size;
541
542 /*
543 * Replace the bigger block with a smaller block in the
544 * same position in the list
545 */
546 free_blk = (block_hdr *)(((int)current)
547 + sizeof(block_hdr) + size);
548 free_blk->magic = BLOCK_HDR_MAGIC;
549 free_blk->next = current->next;
550 free_blk->previous = current->previous;
551 if (current->next)
552 {
553 current->next->previous = free_blk;
554 }
555 if (current->previous)
556 {
557 current->previous->next = free_blk;
558 }
559 free_blk->size = current->size - (sizeof(block_hdr) + size);
560 if (current==free_list_head)
561 {
562 free_list_head=free_blk;
563 }
564
565 current->size=size;
566 add_to_used_list(current);
567
568 EiUsedNonPagedPool = EiUsedNonPagedPool + current->size;
569 EiFreeNonPagedPool = EiFreeNonPagedPool + free_blk->size;
570
571 VALIDATE_POOL;
572 return(block_to_address(current));
573 }
574
575 /*
576 * Otherwise allocate the whole block
577 */
578 remove_from_free_list(current);
579 add_to_used_list(current);
580
581 EiFreeNonPagedPool = EiFreeNonPagedPool - current->size;
582 EiUsedNonPagedPool = EiUsedNonPagedPool + current->size;
583
584 VALIDATE_POOL;
585 return(block_to_address(current));
586 }
587
588 asmlinkage VOID ExFreePool(PVOID block)
589 /*
590 * FUNCTION: Releases previously allocated memory
591 * ARGUMENTS:
592 * block = block to free
593 */
594 {
595 block_hdr* blk=address_to_block(block);
596 KIRQL oldIrql;
597
598 OLD_DPRINT("(%s:%d) freeing block %x\n",__FILE__,__LINE__,blk);
599
600 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->size,
601 ((PULONG)&block)[-1]);
602
603 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
604
605 memset(block, 0xcc, blk->size);
606
607 VALIDATE_POOL;
608
609 if (blk->magic != BLOCK_HDR_MAGIC)
610 {
611 DbgPrint("ExFreePool of non-allocated address %x\n",block);
612 KeBugCheck(0);
613 return;
614 }
615
616 /*
617 * Please don't change the order
618 */
619 remove_from_used_list(blk);
620 add_to_free_list(blk);
621
622 EiUsedNonPagedPool = EiUsedNonPagedPool - blk->size;
623 EiFreeNonPagedPool = EiFreeNonPagedPool + blk->size;
624
625 VALIDATE_POOL;
626
627 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
628 }
629
630 PVOID STDCALL ExAllocateNonPagedPoolWithTag(ULONG type,
631 ULONG size,
632 ULONG Tag,
633 PVOID Caller)
634 {
635 block_hdr* current = NULL;
636 PVOID block;
637 block_hdr* best = NULL;
638 KIRQL oldIrql;
639
640 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
641 size,Caller);
642
643 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
644
645 // DbgPrint("Blocks on free list %d\n",nr_free_blocks);
646 // DbgPrint("Blocks on used list %d\n",eiNrUsedblocks);
647 // OLD_DPRINT("ExAllocateNonPagedPool(type %d, size %d)\n",type,size);
648 VALIDATE_POOL;
649
650 /*
651 * accomodate this useful idiom
652 */
653 if (size==0)
654 {
655 POOL_TRACE("= NULL\n");
656 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
657 return(NULL);
658 }
659
660 /*
661 * Look for an already created block of sufficent size
662 */
663 current=free_list_head;
664
665 // defrag_free_list();
666
667 while (current!=NULL)
668 {
669 OLD_DPRINT("current %x size %x next %x\n",current,current->size,
670 current->next);
671 if (current->size>=size &&
672 (best == NULL ||
673 current->size < best->size))
674 {
675 best = current;
676 }
677 current=current->next;
678 }
679 if (best != NULL)
680 {
681 OLD_DPRINT("found block %x of size %d\n",best,size);
682 block=take_block(best,size);
683 VALIDATE_POOL;
684 memset(block,0,size);
685 POOL_TRACE("= %x\n",block);
686 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
687 return(block);
688 }
689
690
691 /*
692 * Otherwise create a new block
693 */
694 block=block_to_address(grow_kernel_pool(size));
695 VALIDATE_POOL;
696 memset(block,0,size);
697 POOL_TRACE("= %x\n",block);
698 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
699 return(block);
700 }
701
702
703 /* EOF */