Began converting minix fsd to work with new caching mechanism
[reactos.git] / reactos / ntoskrnl / mm / npool.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/pool.c
5 * PURPOSE: Implements the kernel memory pool
6 * PROGRAMMER: David Welch (welch@cwcom.net)
7 * UPDATE HISTORY:
8 * 27/05/98: Created
9 * 10/06/98: Bug fixes by Iwan Fatahi (i_fatahi@hotmail.com)
10 * in take_block (if current bigger than required)
11 * in remove_from_used_list
12 * in ExFreePool
13 * 23/08/98: Fixes from Robert Bergkvist (fragdance@hotmail.com)
14 */
15
16 /* INCLUDES ****************************************************************/
17
18 #include <ddk/ntddk.h>
19 #include <string.h>
20 #include <internal/string.h>
21 #include <internal/stddef.h>
22 #include <internal/mm.h>
23 #include <internal/mmhal.h>
24 #include <internal/bitops.h>
25 #include <internal/ntoskrnl.h>
26
27 #define NDEBUG
28 #include <internal/debug.h>
29
30
31
32 #if 0
33 #define VALIDATE_POOL validate_kernel_pool()
34 #else
35 #define VALIDATE_POOL
36 #endif
37
38 #if 0
39 #define POOL_TRACE(args...) do { DbgPrint(args); } while(0);
40 #else
41 #define POOL_TRACE(args...)
42 #endif
43
44 /* TYPES *******************************************************************/
45
46 #define BLOCK_HDR_MAGIC (0xdeadbeef)
47
48 /*
49 * fields present at the start of a block (this is for internal use only)
50 */
51 typedef struct _block_hdr
52 {
53 ULONG magic;
54 ULONG size;
55 struct _block_hdr* previous;
56 struct _block_hdr* next;
57 ULONG tag;
58 } block_hdr;
59
60 /* GLOBALS *****************************************************************/
61
62 /*
63 * Memory managment initalized symbol for the base of the pool
64 */
65 static unsigned int kernel_pool_base = 0;
66
67 /*
68 * Pointer to the first block in the free list
69 */
70 static block_hdr* free_list_head = NULL;
71 static block_hdr* used_list_head = NULL;
72 static ULONG nr_free_blocks;
73 ULONG EiNrUsedBlocks = 0;
74
75 #define ALLOC_MAP_SIZE (NONPAGED_POOL_SIZE / PAGESIZE)
76
77 /*
78 * One bit for each page in the kmalloc region
79 * If set then the page is used by a kmalloc block
80 */
81 static unsigned int alloc_map[ALLOC_MAP_SIZE/32]={0,};
82 static KSPIN_LOCK AllocMapLock;
83
84 unsigned int EiFreeNonPagedPool = 0;
85 unsigned int EiUsedNonPagedPool = 0;
86
87 /* FUNCTIONS ***************************************************************/
88
89 VOID ExUnmapPage(PVOID Addr)
90 {
91 KIRQL oldIrql;
92 ULONG i = ((ULONG)Addr - kernel_pool_base) / PAGESIZE;
93
94 DPRINT("ExUnmapPage(Addr %x)\n",Addr);
95 DPRINT("i %x\n",i);
96
97 KeAcquireSpinLock(&AllocMapLock, &oldIrql);
98 MmSetPage(NULL, (PVOID)Addr, 0, 0);
99 clear_bit(i%32, &alloc_map[i/32]);
100 KeReleaseSpinLock(&AllocMapLock, oldIrql);
101 }
102
103 PVOID ExAllocatePage(VOID)
104 {
105 KIRQL oldlvl;
106 ULONG addr;
107 ULONG i;
108 ULONG PhysPage;
109
110 PhysPage = (ULONG)MmAllocPage();
111 DPRINT("Allocated page %x\n",PhysPage);
112 if (PhysPage == 0)
113 {
114 return(NULL);
115 }
116
117 KeAcquireSpinLock(&AllocMapLock, &oldlvl);
118 for (i=1; i<ALLOC_MAP_SIZE;i++)
119 {
120 if (!test_bit(i%32,&alloc_map[i/32]))
121 {
122 DPRINT("i %x\n",i);
123 set_bit(i%32,&alloc_map[i/32]);
124 addr = kernel_pool_base + (i*PAGESIZE);
125 MmSetPage(NULL, (PVOID)addr, PAGE_READWRITE, PhysPage);
126 KeReleaseSpinLock(&AllocMapLock, oldlvl);
127 return((PVOID)addr);
128 }
129 }
130 KeReleaseSpinLock(&AllocMapLock, oldlvl);
131 return(NULL);
132 }
133
134 VOID ExInitNonPagedPool(ULONG BaseAddress)
135 {
136 kernel_pool_base = BaseAddress;
137 KeInitializeSpinLock(&AllocMapLock);
138 }
139
140 #if 0
141 static void validate_free_list(void)
142 /*
143 * FUNCTION: Validate the integrity of the list of free blocks
144 */
145 {
146 block_hdr* current=free_list_head;
147 unsigned int blocks_seen=0;
148
149 while (current!=NULL)
150 {
151 unsigned int base_addr = (int)current;
152
153 if (current->magic != BLOCK_HDR_MAGIC)
154 {
155 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
156 current);
157 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
158 }
159
160 if (base_addr < (kernel_pool_base) ||
161 (base_addr+current->size) >
162 (kernel_pool_base)+NONPAGED_POOL_SIZE)
163 {
164 DbgPrint("Block %x found outside pool area\n",current);
165 DbgPrint("Size %d\n",current->size);
166 DbgPrint("Limits are %x %x\n",kernel_pool_base,
167 kernel_pool_base+NONPAGED_POOL_SIZE);
168 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
169 }
170 blocks_seen++;
171 if (blocks_seen > nr_free_blocks)
172 {
173 DbgPrint("Too many blocks on list\n");
174 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
175 }
176 // verify_for_write(base_addr,current->size);
177 if (current->next!=NULL&&current->next->previous!=current)
178 {
179 DbgPrint("%s:%d:Break in list (current %x next %x "
180 "current->next->previous %x)\n",
181 __FILE__,__LINE__,current,current->next,
182 current->next->previous);
183 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
184 }
185 current=current->next;
186 }
187 }
188
189 static void validate_used_list(void)
190 /*
191 * FUNCTION: Validate the integrity of the list of used blocks
192 */
193 {
194 block_hdr* current=used_list_head;
195 unsigned int blocks_seen=0;
196
197 while (current!=NULL)
198 {
199 unsigned int base_addr = (int)current;
200
201 if (current->magic != BLOCK_HDR_MAGIC)
202 {
203 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
204 current);
205 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
206 }
207 if (base_addr < (kernel_pool_base) ||
208 (base_addr+current->size) >
209 (kernel_pool_base)+NONPAGED_POOL_SIZE)
210 {
211 DbgPrint("Block %x found outside pool area\n",current);
212 for(;;);
213 }
214 blocks_seen++;
215 if (blocks_seen > EiNrUsedBlocks)
216 {
217 DbgPrint("Too many blocks on list\n");
218 for(;;);
219 }
220 // verify_for_write(base_addr,current->size);
221 if (current->next!=NULL&&current->next->previous!=current)
222 {
223 DbgPrint("Break in list (current %x next %x)\n",
224 current,current->next);
225 for(;;);
226 }
227 current=current->next;
228 }
229 }
230
231 static void check_duplicates(block_hdr* blk)
232 /*
233 * FUNCTION: Check a block has no duplicates
234 * ARGUMENTS:
235 * blk = block to check
236 * NOTE: Bug checks if duplicates are found
237 */
238 {
239 unsigned int base = (int)blk;
240 unsigned int last = ((int)blk) + +sizeof(block_hdr) + blk->size;
241
242 block_hdr* current=free_list_head;
243 while (current!=NULL)
244 {
245 if (current->magic != BLOCK_HDR_MAGIC)
246 {
247 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
248 current);
249 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
250 }
251
252 if ( (int)current > base && (int)current < last )
253 {
254 DbgPrint("intersecting blocks on list\n");
255 for(;;);
256 }
257 if ( (int)current < base &&
258 ((int)current + current->size + sizeof(block_hdr))
259 > base )
260 {
261 DbgPrint("intersecting blocks on list\n");
262 for(;;);
263 }
264 current=current->next;
265 }
266 current=used_list_head;
267 while (current!=NULL)
268 {
269 if ( (int)current > base && (int)current < last )
270 {
271 DbgPrint("intersecting blocks on list\n");
272 for(;;);
273 }
274 if ( (int)current < base &&
275 ((int)current + current->size + sizeof(block_hdr))
276 > base )
277 {
278 DbgPrint("intersecting blocks on list\n");
279 for(;;);
280 }
281 current=current->next;
282 }
283
284 }
285
286 static void validate_kernel_pool(void)
287 /*
288 * FUNCTION: Checks the integrity of the kernel memory heap
289 */
290 {
291 block_hdr* current=NULL;
292
293 validate_free_list();
294 validate_used_list();
295
296 current=free_list_head;
297 while (current!=NULL)
298 {
299 check_duplicates(current);
300 current=current->next;
301 }
302 current=used_list_head;
303 while (current!=NULL)
304 {
305 check_duplicates(current);
306 current=current->next;
307 }
308 }
309 #endif
310
311 static void add_to_free_list(block_hdr* blk)
312 /*
313 * FUNCTION: add the block to the free list (internal)
314 */
315 {
316 blk->next=free_list_head;
317 blk->previous=NULL;
318 if (free_list_head!=NULL)
319 {
320 free_list_head->previous=blk;
321 }
322 free_list_head=blk;
323 nr_free_blocks++;
324 }
325
326 static void add_to_used_list(block_hdr* blk)
327 /*
328 * FUNCTION: add the block to the used list (internal)
329 */
330 {
331 blk->next=used_list_head;
332 blk->previous=NULL;
333 if (used_list_head!=NULL)
334 {
335 used_list_head->previous=blk;
336 }
337 used_list_head=blk;
338 EiNrUsedBlocks++;
339 }
340
341
342 static void remove_from_free_list(block_hdr* current)
343 {
344 if (current->next==NULL&&current->previous==NULL)
345 {
346 free_list_head=NULL;
347 }
348 else
349 {
350 if (current->next==NULL)
351 {
352 current->previous->next=NULL;
353 }
354 else if (current->previous==NULL)
355 {
356 current->next->previous=NULL;
357 free_list_head=current->next;
358 }
359 else
360 {
361 current->next->previous=current->previous;
362 current->previous->next=current->next;
363 }
364 }
365 nr_free_blocks--;
366 }
367
368
369 static void remove_from_used_list(block_hdr* current)
370 {
371 if (current->next==NULL&&current->previous==NULL)
372 {
373 used_list_head=NULL;
374 }
375 else
376 {
377 if (current->previous==NULL)
378 {
379 current->next->previous=NULL;
380 used_list_head=current->next;
381 }
382 else
383 {
384 current->previous->next=current->next;
385 }
386 if (current->next!=NULL)
387 {
388 current->next->previous=current->previous;
389 }
390 else
391 {
392 current->previous->next=NULL;
393 }
394 }
395 EiNrUsedBlocks--;
396 }
397
398
399 inline static void* block_to_address(block_hdr* blk)
400 /*
401 * FUNCTION: Translate a block header address to the corresponding block
402 * address (internal)
403 */
404 {
405 return ( (void *) ((int)blk + sizeof(block_hdr)) );
406 }
407
408 inline static block_hdr* address_to_block(void* addr)
409 {
410 return (block_hdr *)
411 ( ((int)addr) - sizeof(block_hdr) );
412 }
413
414 static unsigned int alloc_pool_region(unsigned int nr_pages)
415 /*
416 * FUNCTION: Allocates a region of pages within the nonpaged pool area
417 */
418 {
419 unsigned int start = 0;
420 unsigned int length = 0;
421 unsigned int i,j;
422
423 OLD_DPRINT("alloc_pool_region(nr_pages = %d)\n",nr_pages);
424
425 for (i=1; i<ALLOC_MAP_SIZE;i++)
426 {
427 if (!test_bit(i%32,&alloc_map[i/32]))
428 {
429 if (length == 0)
430 {
431 start=i;
432 length = 1;
433 }
434 else
435 {
436 length++;
437 }
438 if (length==nr_pages)
439 {
440 OLD_DPRINT("found region at %d for %d\n",start,
441 length);
442 for (j=start;j<(start+length);j++)
443 {
444 set_bit(j%32,&alloc_map[j/32]);
445 }
446 OLD_DPRINT("returning %x\n",(start*PAGESIZE)
447 +kernel_pool_base);
448 return((start*PAGESIZE)+kernel_pool_base);
449 }
450 }
451 else
452 {
453 start=0;
454 length=0;
455 }
456 }
457 DbgPrint("CRITICAL: Out of non-paged pool space\n");
458 for(;;);
459 return(0);
460 }
461
462 static block_hdr* grow_kernel_pool(unsigned int size)
463 /*
464 * FUNCTION: Grow the executive heap to accomodate a block of at least 'size'
465 * bytes
466 */
467 {
468 unsigned int total_size = size + sizeof(block_hdr);
469 unsigned int nr_pages = PAGE_ROUND_UP(total_size) / PAGESIZE;
470 unsigned int start = alloc_pool_region(nr_pages);
471 block_hdr* used_blk=NULL;
472 block_hdr* free_blk=NULL;
473 int i;
474
475 OLD_DPRINT("growing heap for block size %d, ",size);
476 OLD_DPRINT("start %x\n",start);
477
478 for (i=0;i<nr_pages;i++)
479 {
480 MmSetPage(NULL,
481 (PVOID)(start + (i*PAGESIZE)),
482 PAGE_READWRITE,
483 (ULONG)MmAllocPage());
484 }
485
486
487 if ((PAGESIZE-(total_size%PAGESIZE))>(2*sizeof(block_hdr)))
488 {
489 used_blk = (struct _block_hdr *)start;
490 OLD_DPRINT("Creating block at %x\n",start);
491 used_blk->magic = BLOCK_HDR_MAGIC;
492 used_blk->size = size;
493 add_to_used_list(used_blk);
494
495 free_blk = (block_hdr *)(start + sizeof(block_hdr) + size);
496 OLD_DPRINT("Creating block at %x\n",free_blk);
497 free_blk->magic = BLOCK_HDR_MAGIC;
498 free_blk->size = (nr_pages * PAGESIZE) -((sizeof(block_hdr)*2) + size);
499 add_to_free_list(free_blk);
500
501 EiFreeNonPagedPool = EiFreeNonPagedPool + free_blk->size;
502 EiUsedNonPagedPool = EiUsedNonPagedPool + used_blk->size;
503 }
504 else
505 {
506 used_blk = (struct _block_hdr *)start;
507 used_blk->magic = BLOCK_HDR_MAGIC;
508 used_blk->size = nr_pages * PAGESIZE;
509 add_to_used_list(used_blk);
510
511 EiUsedNonPagedPool = EiUsedNonPagedPool + used_blk->size;
512 }
513
514 VALIDATE_POOL;
515 return(used_blk);
516 }
517
518 static void* take_block(block_hdr* current, unsigned int size)
519 /*
520 * FUNCTION: Allocate a used block of least 'size' from the specified
521 * free block
522 * RETURNS: The address of the created memory block
523 */
524 {
525 /*
526 * If the block is much bigger than required then split it and
527 * return a pointer to the allocated section. If the difference
528 * between the sizes is marginal it makes no sense to have the
529 * extra overhead
530 */
531 if (current->size > (1 + size + sizeof(block_hdr)))
532 {
533 block_hdr* free_blk;
534
535 EiFreeNonPagedPool = EiFreeNonPagedPool - current->size;
536
537 /*
538 * Replace the bigger block with a smaller block in the
539 * same position in the list
540 */
541 free_blk = (block_hdr *)(((int)current)
542 + sizeof(block_hdr) + size);
543 free_blk->magic = BLOCK_HDR_MAGIC;
544 free_blk->next = current->next;
545 free_blk->previous = current->previous;
546 if (current->next)
547 {
548 current->next->previous = free_blk;
549 }
550 if (current->previous)
551 {
552 current->previous->next = free_blk;
553 }
554 free_blk->size = current->size - (sizeof(block_hdr) + size);
555 if (current==free_list_head)
556 {
557 free_list_head=free_blk;
558 }
559
560 current->size=size;
561 add_to_used_list(current);
562
563 EiUsedNonPagedPool = EiUsedNonPagedPool + current->size;
564 EiFreeNonPagedPool = EiFreeNonPagedPool + free_blk->size;
565
566 VALIDATE_POOL;
567 return(block_to_address(current));
568 }
569
570 /*
571 * Otherwise allocate the whole block
572 */
573 remove_from_free_list(current);
574 add_to_used_list(current);
575
576 EiFreeNonPagedPool = EiFreeNonPagedPool - current->size;
577 EiUsedNonPagedPool = EiUsedNonPagedPool + current->size;
578
579 VALIDATE_POOL;
580 return(block_to_address(current));
581 }
582
583 asmlinkage VOID ExFreePool(PVOID block)
584 /*
585 * FUNCTION: Releases previously allocated memory
586 * ARGUMENTS:
587 * block = block to free
588 */
589 {
590 block_hdr* blk=address_to_block(block);
591 OLD_DPRINT("(%s:%d) freeing block %x\n",__FILE__,__LINE__,blk);
592
593 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->size,
594 ((PULONG)&block)[-1]);
595
596 VALIDATE_POOL;
597
598 if (blk->magic != BLOCK_HDR_MAGIC)
599 {
600 DbgPrint("ExFreePool of non-allocated address %x\n",block);
601 KeBugCheck(0);
602 return;
603 }
604
605 /*
606 * Please don't change the order
607 */
608 remove_from_used_list(blk);
609 add_to_free_list(blk);
610
611 EiUsedNonPagedPool = EiUsedNonPagedPool - blk->size;
612 EiFreeNonPagedPool = EiFreeNonPagedPool + blk->size;
613
614 VALIDATE_POOL;
615 }
616
617 PVOID ExAllocateNonPagedPoolWithTag(ULONG type,
618 ULONG size,
619 ULONG Tag,
620 PVOID Caller)
621 {
622 block_hdr* current = NULL;
623 PVOID block;
624 block_hdr* best = NULL;
625
626 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
627 size,Caller);
628
629 // DbgPrint("Blocks on free list %d\n",nr_free_blocks);
630 // DbgPrint("Blocks on used list %d\n",eiNrUsedblocks);
631 // OLD_DPRINT("ExAllocateNonPagedPool(type %d, size %d)\n",type,size);
632 VALIDATE_POOL;
633
634 /*
635 * accomodate this useful idiom
636 */
637 if (size==0)
638 {
639 POOL_TRACE("= NULL\n");
640 return(NULL);
641 }
642
643 /*
644 * Look for an already created block of sufficent size
645 */
646 current=free_list_head;
647
648 // defrag_free_list();
649
650 while (current!=NULL)
651 {
652 OLD_DPRINT("current %x size %x next %x\n",current,current->size,
653 current->next);
654 if (current->size>=size &&
655 (best == NULL ||
656 current->size < best->size))
657 {
658 best = current;
659 }
660 current=current->next;
661 }
662 if (best != NULL)
663 {
664 OLD_DPRINT("found block %x of size %d\n",best,size);
665 block=take_block(best,size);
666 VALIDATE_POOL;
667 memset(block,0,size);
668 POOL_TRACE("= %x\n",block);
669 return(block);
670 }
671
672
673 /*
674 * Otherwise create a new block
675 */
676 block=block_to_address(grow_kernel_pool(size));
677 VALIDATE_POOL;
678 memset(block,0,size);
679 POOL_TRACE("= %x\n",block);
680 return(block);
681 }
682