Merge adjacent free blocks in the non-paged pool
[reactos.git] / reactos / ntoskrnl / mm / npool.c
1 /* $Id: npool.c,v 1.42 2001/03/14 23:19:14 dwelch Exp $
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/npool.c
6 * PURPOSE: Implements the kernel memory pool
7 * PROGRAMMER: David Welch (welch@cwcom.net)
8 * UPDATE HISTORY:
9 * 27/05/98: Created
10 * 10/06/98: Bug fixes by Iwan Fatahi (i_fatahi@hotmail.com)
11 * in take_block (if current bigger than required)
12 * in remove_from_used_list
13 * in ExFreePool
14 * 23/08/98: Fixes from Robert Bergkvist (fragdance@hotmail.com)
15 */
16
17 /* INCLUDES ****************************************************************/
18
19 #include <ddk/ntddk.h>
20 #include <internal/mm.h>
21 #include <internal/mmhal.h>
22 #include <internal/bitops.h>
23 #include <internal/ntoskrnl.h>
24 #include <internal/pool.h>
25
26 #define NDEBUG
27 #include <internal/debug.h>
28
29 /* Enable strict checking of the nonpaged pool on every allocation */
30 /* #define ENABLE_VALIDATE_POOL */
31
32 /* Enable tracking of statistics about the tagged blocks in the pool */
33 #define TAG_STATISTICS_TRACKING
34
35 /*
36 * Put each block in its own range of pages and position the block at the
37 * end of the range so any accesses beyond the end of block are to invalid
38 * memory locations.
39 * FIXME: Not implemented yet.
40 */
41 /* #define WHOLE_PAGE_ALLOCATIONS */
42
43 #ifdef ENABLE_VALIDATE_POOL
44 #define VALIDATE_POOL validate_kernel_pool()
45 #else
46 #define VALIDATE_POOL
47 #endif
48
49 #if 0
50 #define POOL_TRACE(args...) do { DbgPrint(args); } while(0);
51 #else
52 #define POOL_TRACE(args...)
53 #endif
54
55 /* TYPES *******************************************************************/
56
57 #define BLOCK_HDR_USED_MAGIC (0xdeadbeef)
58 #define BLOCK_HDR_FREE_MAGIC (0xceadbeef)
59
60 /*
61 * fields present at the start of a block (this is for internal use only)
62 */
63 typedef struct _BLOCK_HDR
64 {
65 ULONG Magic;
66 ULONG Size;
67 LIST_ENTRY ListEntry;
68 ULONG Tag;
69 PVOID Caller;
70 struct _BLOCK_HDR* tag_next;
71 BOOLEAN Dumped;
72 } BLOCK_HDR;
73
74 /* GLOBALS *****************************************************************/
75
76 /*
77 * Memory managment initalized symbol for the base of the pool
78 */
79 static unsigned int kernel_pool_base = 0;
80
81 /*
82 * Head of the list of free blocks
83 */
84 static LIST_ENTRY FreeBlockListHead;
85
86 /*
87 * Head of the list of in use block
88 */
89 static LIST_ENTRY UsedBlockListHead;
90
91 /*
92 * Count of free blocks
93 */
94 static ULONG EiNrFreeBlocks = 0;
95
96 /*
97 * Count of used blocks
98 */
99 static ULONG EiNrUsedBlocks = 0;
100
101 /*
102 * Lock that protects the non-paged pool data structures
103 */
104 static KSPIN_LOCK MmNpoolLock;
105
106 /*
107 * Total memory used for free nonpaged pool blocks
108 */
109 ULONG EiFreeNonPagedPool = 0;
110
111 /*
112 * Total memory used for nonpaged pool blocks
113 */
114 ULONG EiUsedNonPagedPool = 0;
115
116 /*
117 * Allocate a range of memory in the nonpaged pool
118 */
119 PVOID
120 MiAllocNonPagedPoolRegion(unsigned int nr_pages);
121
122 #ifdef TAG_STATISTICS_TRACKING
123 #define TAG_HASH_TABLE_SIZE (1024)
124 static BLOCK_HDR* tag_hash_table[TAG_HASH_TABLE_SIZE];
125 #endif /* TAG_STATISTICS_TRACKING */
126
127 /* FUNCTIONS ***************************************************************/
128
129 #ifdef TAG_STATISTICS_TRACKING
130 VOID
131 MiRemoveFromTagHashTable(BLOCK_HDR* block)
132 /*
133 * Remove a block from the tag hash table
134 */
135 {
136 BLOCK_HDR* previous;
137 BLOCK_HDR* current;
138 ULONG hash;
139
140 if (block->Tag == 0)
141 {
142 return;
143 }
144
145 hash = block->Tag % TAG_HASH_TABLE_SIZE;
146
147 previous = NULL;
148 current = tag_hash_table[hash];
149 while (current != NULL)
150 {
151 if (current == block)
152 {
153 if (previous == NULL)
154 {
155 tag_hash_table[hash] = block->tag_next;
156 }
157 else
158 {
159 previous->tag_next = block->tag_next;
160 }
161 return;
162 }
163 previous = current;
164 current = current->tag_next;
165 }
166 DPRINT1("Tagged block wasn't on hash table list (Tag %x Caller %x)\n",
167 block->Tag, block->Caller);
168 KeBugCheck(0);
169 }
170
171 VOID
172 MiAddToTagHashTable(BLOCK_HDR* block)
173 /*
174 * Add a block to the tag hash table
175 */
176 {
177 ULONG hash;
178 BLOCK_HDR* current;
179 BLOCK_HDR* previous;
180
181 if (block->Tag == 0)
182 {
183 return;
184 }
185
186 hash = block->Tag % TAG_HASH_TABLE_SIZE;
187
188 previous = NULL;
189 current = tag_hash_table[hash];
190 while (current != NULL)
191 {
192 if (current->Tag == block->Tag)
193 {
194 block->tag_next = current->tag_next;
195 current->tag_next = block;
196 return;
197 }
198 previous = current;
199 current = current->tag_next;
200 }
201 block->tag_next = NULL;
202 if (previous == NULL)
203 {
204 tag_hash_table[hash] = block;
205 }
206 else
207 {
208 previous->tag_next = block;
209 }
210 }
211 #endif /* TAG_STATISTICS_TRACKING */
212
213 VOID
214 ExInitNonPagedPool(ULONG BaseAddress)
215 {
216 kernel_pool_base = BaseAddress;
217 KeInitializeSpinLock(&MmNpoolLock);
218 MmInitKernelMap((PVOID)BaseAddress);
219 memset(tag_hash_table, 0, sizeof(tag_hash_table));
220 InitializeListHead(&FreeBlockListHead);
221 InitializeListHead(&UsedBlockListHead);
222 }
223
224 #ifdef TAG_STATISTICS_TRACKING
225 VOID STATIC
226 MiDumpTagStats(ULONG CurrentTag, ULONG CurrentNrBlocks, ULONG CurrentSize)
227 {
228 CHAR c1, c2, c3, c4;
229
230 c1 = (CurrentTag >> 24) & 0xFF;
231 c2 = (CurrentTag >> 16) & 0xFF;
232 c3 = (CurrentTag >> 8) & 0xFF;
233 c4 = CurrentTag & 0xFF;
234
235 if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
236 {
237 DbgPrint("Tag %x (%c%c%c%c) Blocks %d Total Size %d Average Size %d\n",
238 CurrentTag, c4, c3, c2, c1, CurrentNrBlocks,
239 CurrentSize, CurrentSize / CurrentNrBlocks);
240 }
241 else
242 {
243 DbgPrint("Tag %x Blocks %d Total Size %d Average Size %d\n",
244 CurrentTag, CurrentNrBlocks, CurrentSize,
245 CurrentSize / CurrentNrBlocks);
246 }
247 }
248 #endif /* TAG_STATISTICS_TRACKING */
249
250 VOID
251 MiDebugDumpNonPagedPoolStats(BOOLEAN NewOnly)
252 {
253 #ifdef TAG_STATISTICS_TRACKING
254 ULONG i;
255 BLOCK_HDR* current;
256 ULONG CurrentTag;
257 ULONG CurrentNrBlocks;
258 ULONG CurrentSize;
259 ULONG TotalBlocks;
260 ULONG TotalSize;
261
262 DbgPrint("******* Dumping non paging pool stats ******\n");
263 TotalBlocks = 0;
264 TotalSize = 0;
265 for (i = 0; i < TAG_HASH_TABLE_SIZE; i++)
266 {
267 CurrentTag = 0;
268 CurrentNrBlocks = 0;
269 CurrentSize = 0;
270 current = tag_hash_table[i];
271 while (current != NULL)
272 {
273 if (current->Tag != CurrentTag)
274 {
275 if (CurrentTag != 0 && CurrentNrBlocks != 0)
276 {
277 MiDumpTagStats(CurrentTag, CurrentNrBlocks, CurrentSize);
278 }
279 CurrentTag = current->Tag;
280 CurrentNrBlocks = 0;
281 CurrentSize = 0;
282 }
283
284 if (!NewOnly || !current->Dumped)
285 {
286 CurrentNrBlocks++;
287 TotalBlocks++;
288 CurrentSize = CurrentSize + current->Size;
289 TotalSize = TotalSize + current->Size;
290 current->Dumped = TRUE;
291 }
292 current = current->tag_next;
293 }
294 if (CurrentTag != 0 && CurrentNrBlocks != 0)
295 {
296 MiDumpTagStats(CurrentTag, CurrentNrBlocks, CurrentSize);
297 }
298 }
299 if (TotalBlocks != 0)
300 {
301 DbgPrint("TotalBlocks %d TotalSize %d AverageSize %d\n",
302 TotalBlocks, TotalSize, TotalSize / TotalBlocks);
303 }
304 else
305 {
306 DbgPrint("TotalBlocks %d TotalSize %d\n",
307 TotalBlocks, TotalSize);
308 }
309 DbgPrint("***************** Dump Complete ***************\n");
310 #endif /* TAG_STATISTICS_TRACKING */
311 }
312
313 VOID
314 MiDebugDumpNonPagedPool(BOOLEAN NewOnly)
315 {
316 BLOCK_HDR* current;
317 PLIST_ENTRY current_entry;
318 KIRQL oldIrql;
319
320 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
321
322 DbgPrint("******* Dumping non paging pool contents ******\n");
323 current_entry = UsedBlockListHead.Flink;
324 while (current_entry != &UsedBlockListHead)
325 {
326 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
327 if (!NewOnly || !current->Dumped)
328 {
329 CHAR c1, c2, c3, c4;
330
331 c1 = (current->Tag >> 24) & 0xFF;
332 c2 = (current->Tag >> 16) & 0xFF;
333 c3 = (current->Tag >> 8) & 0xFF;
334 c4 = current->Tag & 0xFF;
335
336 if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
337 {
338 DbgPrint("Size 0x%x Tag 0x%x (%c%c%c%c) Allocator 0x%x\n",
339 current->Size, current->Tag, c4, c3, c2, c1,
340 current->Caller);
341 }
342 else
343 {
344 DbgPrint("Size 0x%x Tag 0x%x Allocator 0x%x\n",
345 current->Size, current->Tag, current->Caller);
346 }
347 current->Dumped = TRUE;
348 }
349 current_entry = current_entry->Flink;
350 }
351 DbgPrint("***************** Dump Complete ***************\n");
352 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
353 }
354
355 #ifdef ENABLE_VALIDATE_POOL
356 static void validate_free_list(void)
357 /*
358 * FUNCTION: Validate the integrity of the list of free blocks
359 */
360 {
361 BLOCK_HDR* current;
362 PLIST_ENTRY current_entry;
363 unsigned int blocks_seen=0;
364
365 current_entry = FreeBlockListHead.Flink;
366 while (current_entry != &FreeBlockListHead)
367 {
368 unsigned int base_addr;
369
370 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
371 base_addr = (int)current;
372
373 if (current->Magic != BLOCK_HDR_FREE_MAGIC)
374 {
375 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
376 current);
377 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
378 }
379
380 if (base_addr < (kernel_pool_base) ||
381 (base_addr+current->Size) > (kernel_pool_base)+NONPAGED_POOL_SIZE)
382 {
383 DbgPrint("Block %x found outside pool area\n",current);
384 DbgPrint("Size %d\n",current->Size);
385 DbgPrint("Limits are %x %x\n",kernel_pool_base,
386 kernel_pool_base+NONPAGED_POOL_SIZE);
387 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
388 }
389 blocks_seen++;
390 if (blocks_seen > EiNrFreeBlocks)
391 {
392 DbgPrint("Too many blocks on free list\n");
393 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
394 }
395 if (current->ListEntry.Flink != &FreeBlockListHead &&
396 current->ListEntry.Flink->Blink != &current->ListEntry)
397 {
398 DbgPrint("%s:%d:Break in list (current %x next %x "
399 "current->next->previous %x)\n",
400 __FILE__,__LINE__,current, current->ListEntry.Flink,
401 current->ListEntry.Flink->Blink);
402 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
403 }
404
405 current_entry = current_entry->Flink;
406 }
407 }
408
409 static void validate_used_list(void)
410 /*
411 * FUNCTION: Validate the integrity of the list of used blocks
412 */
413 {
414 BLOCK_HDR* current;
415 PLIST_ENTRY current_entry;
416 unsigned int blocks_seen=0;
417
418 current_entry = UsedBlockListHead.Flink;
419 while (current_entry != &UsedBlockListHead)
420 {
421 unsigned int base_addr;
422
423 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
424 base_addr = (int)current;
425
426 if (current->Magic != BLOCK_HDR_USED_MAGIC)
427 {
428 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
429 current);
430 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
431 }
432 if (base_addr < (kernel_pool_base) ||
433 (base_addr+current->size) >
434 (kernel_pool_base)+NONPAGED_POOL_SIZE)
435 {
436 DbgPrint("Block %x found outside pool area\n",current);
437 for(;;);
438 }
439 blocks_seen++;
440 if (blocks_seen > EiNrUsedBlocks)
441 {
442 DbgPrint("Too many blocks on used list\n");
443 for(;;);
444 }
445 if (current->ListEntry.Flink != &UsedBlockListHead &&
446 current->ListEntry.Flink->Blink != &current->ListEntry)
447 {
448 DbgPrint("Break in list (current %x next %x)\n",
449 current, current->ListEntry.Flink);
450 for(;;);
451 }
452
453 current_entry = current_entry->Flink;
454 }
455 }
456
457 static void check_duplicates(BLOCK_HDR* blk)
458 /*
459 * FUNCTION: Check a block has no duplicates
460 * ARGUMENTS:
461 * blk = block to check
462 * NOTE: Bug checks if duplicates are found
463 */
464 {
465 unsigned int base = (int)blk;
466 unsigned int last = ((int)blk) + +sizeof(BLOCK_HDR) + blk->size;
467 BLOCK_HDR* current;
468 PLIST_ENTRY current_entry;
469
470 current_entry = FreeBlockListHead.Flink;
471 while (current_entry != &FreeBlockListHead)
472 {
473 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
474
475 if (current->Magic != BLOCK_HDR_FREE_MAGIC)
476 {
477 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
478 current);
479 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
480 }
481
482 if ( (int)current > base && (int)current < last )
483 {
484 DbgPrint("intersecting blocks on list\n");
485 for(;;);
486 }
487 if ( (int)current < base &&
488 ((int)current + current->size + sizeof(BLOCK_HDR))
489 > base )
490 {
491 DbgPrint("intersecting blocks on list\n");
492 for(;;);
493 }
494
495 current_entry = current_entry->Flink;
496 }
497
498 current_entry = UsedBlockListHead.Flink;
499 while (current_entry != &UsedBlockListHead)
500 {
501 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
502
503 if ( (int)current > base && (int)current < last )
504 {
505 DbgPrint("intersecting blocks on list\n");
506 for(;;);
507 }
508 if ( (int)current < base &&
509 ((int)current + current->size + sizeof(BLOCK_HDR))
510 > base )
511 {
512 DbgPrint("intersecting blocks on list\n");
513 for(;;);
514 }
515
516 current_entry = current_entry->Flink;
517 }
518
519 }
520
521 static void validate_kernel_pool(void)
522 /*
523 * FUNCTION: Checks the integrity of the kernel memory heap
524 */
525 {
526 BLOCK_HDR* current;
527 PLIST_ENTRY current_entry;
528
529 validate_free_list();
530 validate_used_list();
531
532 current_entry = FreeBlockListHead.Flink;
533 while (current_entry != &FreeBlockListHead)
534 {
535 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
536 check_duplicates(current);
537 current_entry = current_entry->Flink;
538 }
539 current_entry = UsedBlockListHead.Flink;
540 while (current_entry != &UsedBlockListHead)
541 {
542 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
543 check_duplicates(current);
544 current_entry = current_entry->Flink;
545 }
546 }
547 #endif
548
549 #if 0
550 STATIC VOID
551 free_pages(BLOCK_HDR* blk)
552 {
553 ULONG start;
554 ULONG end;
555 ULONG i;
556
557 start = (ULONG)blk;
558 end = (ULONG)blk + sizeof(BLOCK_HDR) + blk->Size;
559
560 /*
561 * If the block doesn't contain a whole page then there is nothing to do
562 */
563 if (PAGE_ROUND_UP(start) >= PAGE_ROUND_DOWN(end))
564 {
565 return;
566 }
567 }
568 #endif
569
570 STATIC VOID
571 merge_free_block(BLOCK_HDR* blk)
572 {
573 PLIST_ENTRY next_entry;
574 BLOCK_HDR* next;
575 PLIST_ENTRY previous_entry;
576 BLOCK_HDR* previous;
577
578 next_entry = blk->ListEntry.Flink;
579 if (next_entry != &FreeBlockListHead)
580 {
581 next = CONTAINING_RECORD(next_entry, BLOCK_HDR, ListEntry);
582 if (((unsigned int)blk + sizeof(BLOCK_HDR) + blk->Size) ==
583 (unsigned int)next)
584 {
585 RemoveEntryList(&next->ListEntry);
586 blk->Size = blk->Size + sizeof(BLOCK_HDR) + next->Size;
587 EiNrFreeBlocks--;
588 }
589 }
590
591 previous_entry = blk->ListEntry.Blink;
592 if (previous_entry != &FreeBlockListHead)
593 {
594 previous = CONTAINING_RECORD(previous_entry, BLOCK_HDR, ListEntry);
595 if (((unsigned int)previous + sizeof(BLOCK_HDR) + previous->Size) ==
596 (unsigned int)blk)
597 {
598 RemoveEntryList(&blk->ListEntry);
599 previous->Size = previous->Size + sizeof(BLOCK_HDR) + blk->Size;
600 EiNrFreeBlocks--;
601 }
602 }
603 }
604
605 STATIC VOID
606 add_to_free_list(BLOCK_HDR* blk)
607 /*
608 * FUNCTION: add the block to the free list (internal)
609 */
610 {
611 PLIST_ENTRY current_entry;
612 BLOCK_HDR* current;
613
614 current_entry = FreeBlockListHead.Flink;
615 while (current_entry != &FreeBlockListHead)
616 {
617 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
618
619 if ((unsigned int)current > (unsigned int)blk)
620 {
621 blk->ListEntry.Flink = current_entry;
622 blk->ListEntry.Blink = current_entry->Blink;
623 current_entry->Blink->Flink = &blk->ListEntry;
624 current_entry->Blink = &blk->ListEntry;
625 EiNrFreeBlocks++;
626 return;
627 }
628
629 current_entry = current_entry->Flink;
630 }
631 InsertTailList(&FreeBlockListHead, &blk->ListEntry);
632 EiNrFreeBlocks++;
633 }
634
635 static void add_to_used_list(BLOCK_HDR* blk)
636 /*
637 * FUNCTION: add the block to the used list (internal)
638 */
639 {
640 InsertHeadList(&UsedBlockListHead, &blk->ListEntry);
641 EiNrUsedBlocks++;
642 }
643
644
645 static void remove_from_free_list(BLOCK_HDR* current)
646 {
647 RemoveEntryList(&current->ListEntry);
648 EiNrFreeBlocks--;
649 }
650
651
652 static void remove_from_used_list(BLOCK_HDR* current)
653 {
654 RemoveEntryList(&current->ListEntry);
655 EiNrUsedBlocks--;
656 }
657
658
659 inline static void* block_to_address(BLOCK_HDR* blk)
660 /*
661 * FUNCTION: Translate a block header address to the corresponding block
662 * address (internal)
663 */
664 {
665 return ( (void *) ((int)blk + sizeof(BLOCK_HDR)) );
666 }
667
668 inline static BLOCK_HDR* address_to_block(void* addr)
669 {
670 return (BLOCK_HDR *)
671 ( ((int)addr) - sizeof(BLOCK_HDR) );
672 }
673
674 static BLOCK_HDR* grow_kernel_pool(unsigned int size, ULONG Tag, PVOID Caller)
675 /*
676 * FUNCTION: Grow the executive heap to accomodate a block of at least 'size'
677 * bytes
678 */
679 {
680 unsigned int total_size = size + sizeof(BLOCK_HDR);
681 unsigned int nr_pages = PAGE_ROUND_UP(total_size) / PAGESIZE;
682 unsigned int start = (ULONG)MiAllocNonPagedPoolRegion(nr_pages);
683 BLOCK_HDR* used_blk=NULL;
684 BLOCK_HDR* free_blk=NULL;
685 int i;
686 NTSTATUS Status;
687
688 OLD_DPRINT("growing heap for block size %d, ",size);
689 OLD_DPRINT("start %x\n",start);
690
691 for (i=0;i<nr_pages;i++)
692 {
693 Status = MmCreateVirtualMapping(NULL,
694 (PVOID)(start + (i*PAGESIZE)),
695 PAGE_READWRITE,
696 (ULONG)MmAllocPage(0));
697 if (!NT_SUCCESS(Status))
698 {
699 DbgPrint("Unable to create virtual mapping\n");
700 KeBugCheck(0);
701 }
702 }
703
704
705 if ((PAGESIZE-(total_size%PAGESIZE))>(2*sizeof(BLOCK_HDR)))
706 {
707 used_blk = (struct _BLOCK_HDR *)start;
708 OLD_DPRINT("Creating block at %x\n",start);
709 used_blk->Magic = BLOCK_HDR_USED_MAGIC;
710 used_blk->Size = size;
711 add_to_used_list(used_blk);
712
713 free_blk = (BLOCK_HDR *)(start + sizeof(BLOCK_HDR) + size);
714 OLD_DPRINT("Creating block at %x\n",free_blk);
715 free_blk->Magic = BLOCK_HDR_FREE_MAGIC;
716 free_blk->Size = (nr_pages * PAGESIZE) -((sizeof(BLOCK_HDR)*2) + size);
717 add_to_free_list(free_blk);
718
719 EiFreeNonPagedPool = EiFreeNonPagedPool + free_blk->Size;
720 EiUsedNonPagedPool = EiUsedNonPagedPool + used_blk->Size;
721 }
722 else
723 {
724 used_blk = (struct _BLOCK_HDR *)start;
725 used_blk->Magic = BLOCK_HDR_USED_MAGIC;
726 used_blk->Size = (nr_pages * PAGESIZE) - sizeof(BLOCK_HDR);
727 add_to_used_list(used_blk);
728
729 EiUsedNonPagedPool = EiUsedNonPagedPool + used_blk->Size;
730 }
731
732 used_blk->Tag = Tag;
733 used_blk->Caller = Caller;
734 used_blk->Dumped = FALSE;
735 #ifdef TAG_STATISTICS_TRACKING
736 MiAddToTagHashTable(used_blk);
737 #endif /* TAG_STATISTICS_TRACKING */
738
739 VALIDATE_POOL;
740 return(used_blk);
741 }
742
743 static void* take_block(BLOCK_HDR* current, unsigned int size,
744 ULONG Tag, PVOID Caller)
745 /*
746 * FUNCTION: Allocate a used block of least 'size' from the specified
747 * free block
748 * RETURNS: The address of the created memory block
749 */
750 {
751 /*
752 * If the block is much bigger than required then split it and
753 * return a pointer to the allocated section. If the difference
754 * between the sizes is marginal it makes no sense to have the
755 * extra overhead
756 */
757 if (current->Size > (1 + size + sizeof(BLOCK_HDR)))
758 {
759 BLOCK_HDR* free_blk;
760
761 EiFreeNonPagedPool = EiFreeNonPagedPool - current->Size;
762
763 /*
764 * Replace the bigger block with a smaller block in the
765 * same position in the list
766 */
767 free_blk = (BLOCK_HDR *)(((int)current)
768 + sizeof(BLOCK_HDR) + size);
769 free_blk->Magic = BLOCK_HDR_FREE_MAGIC;
770 InsertHeadList(&current->ListEntry, &free_blk->ListEntry);
771 free_blk->Size = current->Size - (sizeof(BLOCK_HDR) + size);
772
773 current->Size=size;
774 RemoveEntryList(&current->ListEntry);
775 InsertHeadList(&UsedBlockListHead, &current->ListEntry);
776 EiNrUsedBlocks++;
777 current->Magic = BLOCK_HDR_USED_MAGIC;
778 current->Tag = Tag;
779 current->Caller = Caller;
780 current->Dumped = FALSE;
781 #ifdef TAG_STATISTICS_TRACKING
782 MiAddToTagHashTable(current);
783 #endif /* TAG_STATISTICS_TRACKING */
784
785 EiUsedNonPagedPool = EiUsedNonPagedPool + current->Size;
786 EiFreeNonPagedPool = EiFreeNonPagedPool + free_blk->Size;
787
788 VALIDATE_POOL;
789 return(block_to_address(current));
790 }
791
792 /*
793 * Otherwise allocate the whole block
794 */
795 remove_from_free_list(current);
796 add_to_used_list(current);
797
798 EiFreeNonPagedPool = EiFreeNonPagedPool - current->Size;
799 EiUsedNonPagedPool = EiUsedNonPagedPool + current->Size;
800
801 current->Magic = BLOCK_HDR_USED_MAGIC;
802 current->Tag = Tag;
803 current->Caller = Caller;
804 current->Dumped = FALSE;
805 #ifdef TAG_STATISTICS_TRACKING
806 MiAddToTagHashTable(current);
807 #endif /* TAG_STATISTICS_TRACKING */
808
809 VALIDATE_POOL;
810 return(block_to_address(current));
811 }
812
813 VOID STDCALL ExFreePool (PVOID block)
814 /*
815 * FUNCTION: Releases previously allocated memory
816 * ARGUMENTS:
817 * block = block to free
818 */
819 {
820 BLOCK_HDR* blk=address_to_block(block);
821 KIRQL oldIrql;
822
823 OLD_DPRINT("(%s:%d) freeing block %x\n",__FILE__,__LINE__,blk);
824
825 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->size,
826 ((PULONG)&block)[-1]);
827
828 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
829
830 VALIDATE_POOL;
831
832 if (blk->Magic != BLOCK_HDR_USED_MAGIC)
833 {
834 if (blk->Magic == BLOCK_HDR_FREE_MAGIC)
835 {
836 DbgPrint("ExFreePool of already freed address %x\n", block);
837 }
838 else
839 {
840 DbgPrint("ExFreePool of non-allocated address %x (magic %x)\n",
841 block, blk->Magic);
842 }
843 KeBugCheck(0);
844 return;
845 }
846
847 memset(block, 0xcc, blk->Size);
848
849 #ifdef TAG_STATISTICS_TRACKING
850 MiRemoveFromTagHashTable(blk);
851 #endif /* TAG_STATISTICS_TRACKING */
852 remove_from_used_list(blk);
853 blk->Magic = BLOCK_HDR_FREE_MAGIC;
854 add_to_free_list(blk);
855 merge_free_block(blk);
856
857 EiUsedNonPagedPool = EiUsedNonPagedPool - blk->Size;
858 EiFreeNonPagedPool = EiFreeNonPagedPool + blk->Size;
859
860 VALIDATE_POOL;
861
862 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
863 }
864
865 PVOID STDCALL
866 ExAllocateNonPagedPoolWithTag(ULONG Type, ULONG Size, ULONG Tag, PVOID Caller)
867 {
868 BLOCK_HDR* current = NULL;
869 PLIST_ENTRY current_entry;
870 PVOID block;
871 BLOCK_HDR* best = NULL;
872 KIRQL oldIrql;
873
874 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
875 Size,Caller);
876
877 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
878
879 VALIDATE_POOL;
880
881 /*
882 * accomodate this useful idiom
883 */
884 if (Size == 0)
885 {
886 POOL_TRACE("= NULL\n");
887 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
888 return(NULL);
889 }
890
891 /*
892 * Look for an already created block of sufficent size
893 */
894 current_entry = FreeBlockListHead.Flink;
895 while (current_entry != &FreeBlockListHead)
896 {
897 OLD_DPRINT("current %x size %x next %x\n",current,current->size,
898 current->next);
899 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
900 if (current->Size >= Size &&
901 (best == NULL || current->Size < best->Size))
902 {
903 best = current;
904 }
905 current_entry = current_entry->Flink;
906 }
907 if (best != NULL)
908 {
909 block=take_block(best, Size, Tag, Caller);
910 VALIDATE_POOL;
911 memset(block,0,Size);
912 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
913 return(block);
914 }
915
916
917 /*
918 * Otherwise create a new block
919 */
920 block=block_to_address(grow_kernel_pool(Size, Tag, Caller));
921 VALIDATE_POOL;
922 memset(block, 0, Size);
923 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
924 return(block);
925 }
926
927
928 /* EOF */