Initial revision
[reactos.git] / reactos / ntoskrnl / mm / pool.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/pool.c
5 * PURPOSE: Implements the kernel memory pool
6 * PROGRAMMER: David Welch (welch@mcmail.com)
7 * UPDATE HISTORY:
8 * 27/05/98: Created
9 * 10/06/98: Bug fixes by Iwan Fatahi (i_fatahi@hotmail.com)
10 * in take_block (if current bigger than required)
11 * in remove_from_used_list
12 * in ExFreePool
13 */
14
15 /* INCLUDES ****************************************************************/
16
17 #include <internal/stddef.h>
18 #include <internal/mm.h>
19 #include <internal/hal/page.h>
20 #include <internal/pool.h>
21 #include <internal/bitops.h>
22 #include <internal/kernel.h>
23
24 #define NDEBUG
25 #include <internal/debug.h>
26
27 #include <ddk/ntddk.h>
28
29 /* TYPES *******************************************************************/
30
31 /*
32 * fields present at the start of a block (this is for internal use only)
33 */
34 typedef struct _block_hdr
35 {
36 unsigned int size;
37 struct _block_hdr* previous;
38 struct _block_hdr* next;
39 } block_hdr;
40
41 /* GLOBALS *****************************************************************/
42
43 /*
44 * Memory managment initalized symbol for the base of the pool
45 */
46 extern unsigned int kernel_pool_base;
47
48 /*
49 * Pointer to the first block in the free list
50 */
51 static block_hdr* free_list_head = NULL;
52 static block_hdr* used_list_head = NULL;
53 static unsigned int nr_free_blocks = 0;
54 static unsigned int nr_used_blocks = 0;
55
56 #define ALLOC_MAP_SIZE (NONPAGED_POOL_SIZE / PAGESIZE)
57
58 /*
59 * One bit for each page in the kmalloc region
60 * If set then the page is used by a kmalloc block
61 */
62 static unsigned int alloc_map[ALLOC_MAP_SIZE/32]={0,};
63
64 /* FUNCTIONS ***************************************************************/
65
66 static void validate_free_list(void)
67 /*
68 * FUNCTION: Validate the integrity of the list of free blocks
69 */
70 {
71 block_hdr* current=free_list_head;
72 unsigned int blocks_seen=0;
73
74 while (current!=NULL)
75 {
76 unsigned int base_addr = (int)current;
77 if (base_addr < (kernel_pool_base) ||
78 (base_addr+current->size) >
79 (kernel_pool_base)+NONPAGED_POOL_SIZE)
80 {
81 printk("Block %x found outside pool area\n",current);
82 printk("Size %d\n",current->size);
83 printk("Limits are %x %x\n",kernel_pool_base,
84 kernel_pool_base+NONPAGED_POOL_SIZE);
85 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
86 }
87 blocks_seen++;
88 if (blocks_seen > nr_free_blocks)
89 {
90 printk("Too many blocks on list\n");
91 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
92 }
93 // verify_for_write(base_addr,current->size);
94 if (current->next!=NULL&&current->next->previous!=current)
95 {
96 printk("%s:%d:Break in list (current %x next %x "
97 "current->next->previous %x)\n",
98 __FILE__,__LINE__,current,current->next,
99 current->next->previous);
100 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
101 }
102 current=current->next;
103 }
104 }
105
106 static void validate_used_list(void)
107 /*
108 * FUNCTION: Validate the integrity of the list of used blocks
109 */
110 {
111 block_hdr* current=used_list_head;
112 unsigned int blocks_seen=0;
113
114 while (current!=NULL)
115 {
116 unsigned int base_addr = (int)current;
117 if (base_addr < (kernel_pool_base) ||
118 (base_addr+current->size) >
119 (kernel_pool_base)+NONPAGED_POOL_SIZE)
120 {
121 printk("Block %x found outside pool area\n",current);
122 for(;;);
123 }
124 blocks_seen++;
125 if (blocks_seen > nr_used_blocks)
126 {
127 printk("Too many blocks on list\n");
128 for(;;);
129 }
130 // verify_for_write(base_addr,current->size);
131 if (current->next!=NULL&&current->next->previous!=current)
132 {
133 printk("Break in list (current %x next %x)\n",
134 current,current->next);
135 for(;;);
136 }
137 current=current->next;
138 }
139
140 }
141
142 static void check_duplicates(block_hdr* blk)
143 /*
144 * FUNCTION: Check a block has no duplicates
145 * ARGUMENTS:
146 * blk = block to check
147 * NOTE: Bug checks if duplicates are found
148 */
149 {
150 unsigned int base = (int)blk;
151 unsigned int last = ((int)blk) + +sizeof(block_hdr) + blk->size;
152
153 block_hdr* current=free_list_head;
154 while (current!=NULL)
155 {
156 if ( (int)current > base && (int)current < last )
157 {
158 printk("intersecting blocks on list\n");
159 for(;;);
160 }
161 if ( (int)current < base &&
162 ((int)current + current->size + sizeof(block_hdr))
163 > base )
164 {
165 printk("intersecting blocks on list\n");
166 for(;;);
167 }
168 current=current->next;
169 }
170 current=used_list_head;
171 while (current!=NULL)
172 {
173 if ( (int)current > base && (int)current < last )
174 {
175 printk("intersecting blocks on list\n");
176 for(;;);
177 }
178 if ( (int)current < base &&
179 ((int)current + current->size + sizeof(block_hdr))
180 > base )
181 {
182 printk("intersecting blocks on list\n");
183 for(;;);
184 }
185 current=current->next;
186 }
187
188 }
189
190 static void validate_kernel_pool(void)
191 /*
192 * FUNCTION: Checks the integrity of the kernel memory heap
193 */
194 {
195 block_hdr* current=NULL;
196
197 validate_free_list();
198 validate_used_list();
199
200 current=free_list_head;
201 while (current!=NULL)
202 {
203 check_duplicates(current);
204 current=current->next;
205 }
206 current=used_list_head;
207 while (current!=NULL)
208 {
209 check_duplicates(current);
210 current=current->next;
211 }
212 }
213
214 static void add_to_free_list(block_hdr* blk)
215 /*
216 * FUNCTION: add the block to the free list (internal)
217 */
218 {
219 blk->next=free_list_head;
220 blk->previous=NULL;
221 if (free_list_head!=NULL)
222 {
223 free_list_head->previous=blk;
224 }
225 free_list_head=blk;
226 nr_free_blocks++;
227 }
228
229 static void add_to_used_list(block_hdr* blk)
230 /*
231 * FUNCTION: add the block to the used list (internal)
232 */
233 {
234 blk->next=used_list_head;
235 blk->previous=NULL;
236 if (used_list_head!=NULL)
237 {
238 used_list_head->previous=blk;
239 }
240 used_list_head=blk;
241 nr_used_blocks++;
242 }
243
244
245 static void remove_from_free_list(block_hdr* current)
246 {
247 if (current->next==NULL&&current->previous==NULL)
248 {
249 free_list_head=NULL;
250 }
251 else
252 {
253 if (current->previous!=NULL)
254 {
255 current->previous->next=current->next;
256 }
257 if (current->next!=NULL)
258 {
259 DPRINT("current->previous %x\n",current->previous);
260 current->next->previous=current->previous;
261 }
262 }
263 nr_free_blocks--;
264 }
265
266 #ifdef BROKEN_VERSION_OF_REMOVE_FROM_FREE_LIST
267 static void remove_from_free_list(block_hdr* current)
268 {
269 if (current->next==NULL&&current->previous==NULL)
270 {
271 free_list_head=NULL;
272 }
273 else
274 {
275 if (current->next==NULL)
276 {
277 current->previous->next=NULL;
278 }
279 else
280 {
281 current->previous->next=current->next;
282 }
283 if (current->previous==NULL)
284 {
285 current->next->previous=NULL;
286 }
287 else
288 {
289 current->next->previous=current->previous;
290 }
291 }
292 nr_free_blocks--;
293 }
294 #endif
295
296 static void remove_from_used_list(block_hdr* current)
297 {
298 if (current->next==NULL&&current->previous==NULL)
299 {
300 used_list_head=NULL;
301 }
302 else
303 {
304 if (current->previous==NULL)
305 {
306 current->next->previous=NULL;
307 used_list_head=current->next;
308 }
309 else
310 {
311 current->previous->next=current->next;
312 }
313 if (current->next!=NULL)
314 {
315 current->next->previous=current->previous;
316 }
317 else
318 {
319 current->previous->next=NULL;
320 }
321 }
322 nr_used_blocks--;
323 }
324
325
326 inline static void* block_to_address(block_hdr* blk)
327 /*
328 * FUNCTION: Translate a block header address to the corresponding block
329 * address (internal)
330 */
331 {
332 return ( (void *) ((int)blk + sizeof(block_hdr)) );
333 }
334
335 inline static block_hdr* address_to_block(void* addr)
336 {
337 return (block_hdr *)
338 ( ((int)addr) - sizeof(block_hdr) );
339 }
340
341 static unsigned int alloc_pool_region(unsigned int nr_pages)
342 /*
343 * FUNCTION: Allocates a region of pages within the nonpaged pool area
344 */
345 {
346 unsigned int start = 0;
347 unsigned int length = 0;
348 unsigned int i,j;
349
350 DPRINT("alloc_pool_region(nr_pages = %d)\n",nr_pages);
351
352 for (i=1; i<ALLOC_MAP_SIZE;i++)
353 {
354 if (!test_bit(i%32,&alloc_map[i/32]))
355 {
356 if (length == 0)
357 {
358 start=i;
359 length = 1;
360 }
361 else
362 {
363 length++;
364 }
365 if (length==nr_pages)
366 {
367 DPRINT("found region at %d for %d\n",start,
368 length);
369 for (j=start;j<(start+length);j++)
370 {
371 DPRINT("Writing %x\n",&alloc_map[j/32]);
372 set_bit(j%32,&alloc_map[j/32]);
373 }
374 DPRINT("returning %x\n",(start*PAGESIZE)
375 +kernel_pool_base);
376 return((start*PAGESIZE)+kernel_pool_base);
377 }
378 }
379 else
380 {
381 start=0;
382 length=0;
383 }
384 }
385 printk("CRITICAL: Out of kmalloc space\n");
386 for(;;);
387 return(0);
388 }
389
390 static block_hdr* grow_kernel_pool(unsigned int size)
391 /*
392 * FUNCTION: Grow the executive heap to accomodate a block of at least 'size'
393 * bytes
394 */
395 {
396 unsigned int total_size = size + sizeof(block_hdr);
397 unsigned int nr_pages = PAGE_ROUND_UP(total_size) / PAGESIZE;
398 unsigned int start = alloc_pool_region(nr_pages);
399 block_hdr* used_blk=NULL;
400 block_hdr* free_blk=NULL;
401 int i;
402
403 DPRINT("growing heap for block size %d, ",size);
404 DPRINT("start %x\n",start);
405
406 for (i=0;i<nr_pages;i++)
407 {
408 set_page(start+(i*PAGESIZE),PA_SYSTEM | PA_WRITE | PA_READ,
409 get_free_page());
410 }
411
412
413 if ((PAGESIZE-(total_size%PAGESIZE))>(2*sizeof(block_hdr)))
414 {
415 used_blk = (struct _block_hdr *)start;
416 DPRINT("Creating block at %x\n",start);
417 used_blk->size = size;
418 add_to_used_list(used_blk);
419
420 free_blk = (block_hdr *)(start + sizeof(block_hdr) + size);
421 DPRINT("Creating block at %x\n",free_blk);
422 free_blk->size = (nr_pages * PAGESIZE) -((sizeof(block_hdr)*2) + size);
423 add_to_free_list(free_blk);
424 }
425 else
426 {
427 used_blk = (struct _block_hdr *)start;
428 used_blk->size = nr_pages * PAGESIZE;
429 add_to_used_list(used_blk);
430 }
431
432 validate_kernel_pool();
433 return(used_blk);
434 }
435
436 static void* take_block(block_hdr* current, unsigned int size)
437 /*
438 * FUNCTION: Allocate a used block of least 'size' from the specified
439 * free block
440 * RETURNS: The address of the created memory block
441 */
442 {
443 /*
444 * If the block is much bigger than required then split it and
445 * return a pointer to the allocated section. If the difference
446 * between the sizes is marginal it makes no sense to have the
447 * extra overhead
448 */
449 if (current->size > (1 + size + sizeof(block_hdr)))
450 {
451 /*
452 * Replace the bigger block with a smaller block in the
453 * same position in the list
454 */
455 block_hdr* free_blk = (block_hdr *)(((int)current)
456 + sizeof(block_hdr) + size);
457 free_blk->next = current->next;
458 free_blk->previous = current->previous;
459 if (current->next)
460 {
461 current->next->previous = free_blk;
462 }
463 if (current->previous)
464 {
465 current->previous->next = free_blk;
466 }
467 free_blk->size = current->size - (sizeof(block_hdr) + size);
468 if (current==free_list_head)
469 {
470 free_list_head=free_blk;
471 }
472
473 current->size=size;
474 add_to_used_list(current);
475
476 validate_kernel_pool();
477 return(block_to_address(current));
478 }
479
480 /*
481 * Otherwise allocate the whole block
482 */
483 remove_from_free_list(current);
484 add_to_used_list(current);
485
486 validate_kernel_pool();
487 return(block_to_address(current));
488 }
489
490 asmlinkage VOID ExFreePool(PVOID block)
491 /*
492 * FUNCTION: Releases previously allocated memory
493 * ARGUMENTS:
494 * block = block to free
495 */
496 {
497 block_hdr* blk=address_to_block(block);
498 DPRINT("(%s:%d) freeing block %x\n",__FILE__,__LINE__,blk);
499
500 validate_kernel_pool();
501 /*
502 * Please don't change the order
503 */
504 remove_from_used_list(blk);
505 add_to_free_list(blk);
506
507 validate_kernel_pool();
508 }
509
510 #define CACHE_ALIGNMENT (16)
511
512 PVOID ExAllocatePool(ULONG type, ULONG size)
513 /*
514 * FUNCTION: Allocates memory from the pool
515 * ARGUMENTS:
516 * size = minimum size of the block to be allocated
517 * type = the type of memory to use for the block
518 * RETURNS:
519 * the address of the block if it succeeds
520 */
521 {
522 PVOID Block;
523
524 if (type == NonPagedPoolCacheAligned ||
525 type == NonPagedPoolCacheAlignedMustS)
526 {
527 size = size + CACHE_ALIGNMENT;
528 }
529
530 switch(type)
531 {
532 case NonPagedPool:
533 case NonPagedPoolMustSucceed:
534 case NonPagedPoolCacheAligned:
535 case NonPagedPoolCacheAlignedMustS:
536 Block = ExAllocateNonPagedPool(type,size);
537 break;
538
539 case PagedPool:
540 case PagedPoolCacheAligned:
541 Block = ExAllocatePagedPool(type,size);
542 break;
543
544 default:
545 return(NULL);
546 };
547
548 if ((type==NonPagedPoolMustSucceed || type==NonPagedPoolCacheAlignedMustS)
549 && Block==NULL)
550 {
551 KeBugCheck(MUST_SUCCEED_POOL_EMPTY);
552 }
553 if (type == NonPagedPoolCacheAligned ||
554 type == NonPagedPoolCacheAlignedMustS)
555 {
556 Block = Block + CACHE_ALIGNMENT - (((int)Block)%CACHE_ALIGNMENT);
557 }
558 return(Block);
559 }
560
561 static PVOID ExAllocatePagedPool(ULONG type, ULONG size)
562 {
563 UNIMPLEMENTED;
564 }
565
566 static PVOID ExAllocateNonPagedPool(ULONG type, ULONG size)
567 {
568 block_hdr* current=NULL;
569
570 DPRINT("kmalloc(size %d)\n",size);
571 validate_kernel_pool();
572
573 /*
574 * accomodate this useful idiom
575 */
576 if (size==0)
577 {
578 return(NULL);
579 }
580
581 /*
582 * Look for an already created block of sufficent size
583 */
584 current=free_list_head;
585
586 while (current!=NULL)
587 {
588 DPRINT("current %x size %x next %x\n",current,current->size,
589 current->next);
590 if (current->size>=size)
591 {
592 DPRINT("found block %x of size %d\n",current,size);
593 return(take_block(current,size));
594 }
595 current=current->next;
596 }
597
598 /*
599 * Otherwise create a new block
600 */
601 return(block_to_address(grow_kernel_pool(size)));
602 }
603
604 PVOID ExAllocatePoolWithQuota(POOL_TYPE PoolType, ULONG NumberOfBytes)
605 {
606 PVOID Block;
607 PKTHREAD current = KeGetCurrentThread();
608
609 Block = ExAllocatePool(PoolType,NumberOfBytes);
610 switch(PoolType)
611 {
612 case NonPagedPool:
613 case NonPagedPoolMustSucceed:
614 case NonPagedPoolCacheAligned:
615 case NonPagedPoolCacheAlignedMustS:
616 // current->NPagedPoolQuota = current->NPagedPoolQuota - NumberOfBytes;
617 break;
618
619 case PagedPool:
620 case PagedPoolCacheAligned:
621 // current->PagedPoolQuota = current->PagedPoolQuota - NumberOfBytes;
622 break;
623 };
624 return(Block);
625 }
626
627 PVOID ExAllocatePoolWithQuotaTag(POOL_TYPE PoolType, ULONG NumberOfBytes,
628 ULONG Tag)
629 {
630 PVOID Block;
631 Block=ExAllocatePoolWithQuota(PoolType,NumberOfBytes+sizeof(ULONG));
632 ((ULONG *)Block)[0]=Tag;
633 return(Block+4);
634 }
635
636 PVOID ExAllocatePoolWithTag(POOL_TYPE PoolType, ULONG NumberOfBytes,
637 ULONG Tag)
638 /*
639 * FUNCTION: Allocates pool memory and inserts a caller supplied tag before
640 * the block allocated
641 * ARGUMENTS:
642 * PoolType = Type of memory to allocate
643 * NumberOfBytes = Number of bytes to allocate
644 * Tag = Tag
645 * RETURNS: The address of the block allocated
646 */
647 {
648 PVOID Block;
649 Block=ExAllocatePool(PoolType,NumberOfBytes+sizeof(ULONG));
650 ((ULONG *)Block)[0]=Tag;
651 return(Block+4);
652 }