Zero pages before allocated.
[reactos.git] / reactos / ntoskrnl / mm / npool.c
1 /* $Id: npool.c,v 1.48 2001/10/29 02:39:38 dwelch Exp $
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/npool.c
6 * PURPOSE: Implements the kernel memory pool
7 * PROGRAMMER: David Welch (welch@cwcom.net)
8 * UPDATE HISTORY:
9 * 27/05/98: Created
10 * 10/06/98: Bug fixes by Iwan Fatahi (i_fatahi@hotmail.com)
11 * in take_block (if current bigger than required)
12 * in remove_from_used_list
13 * in ExFreePool
14 * 23/08/98: Fixes from Robert Bergkvist (fragdance@hotmail.com)
15 */
16
17 /* INCLUDES ****************************************************************/
18
19 #include <ddk/ntddk.h>
20 #include <internal/mm.h>
21 #include <internal/bitops.h>
22 #include <internal/ntoskrnl.h>
23 #include <internal/pool.h>
24
25 #define NDEBUG
26 #include <internal/debug.h>
27
28 /* Enable strict checking of the nonpaged pool on every allocation */
29 //#define ENABLE_VALIDATE_POOL
30
31 /* Enable tracking of statistics about the tagged blocks in the pool */
32 #define TAG_STATISTICS_TRACKING
33
34 /*
35 * Put each block in its own range of pages and position the block at the
36 * end of the range so any accesses beyond the end of block are to invalid
37 * memory locations.
38 * FIXME: Not implemented yet.
39 */
40 /* #define WHOLE_PAGE_ALLOCATIONS */
41
42 #ifdef ENABLE_VALIDATE_POOL
43 #define VALIDATE_POOL validate_kernel_pool()
44 #else
45 #define VALIDATE_POOL
46 #endif
47
48 #if 0
49 #define POOL_TRACE(args...) do { DbgPrint(args); } while(0);
50 #else
51 #define POOL_TRACE(args...)
52 #endif
53
54 /* TYPES *******************************************************************/
55
56 #define BLOCK_HDR_USED_MAGIC (0xdeadbeef)
57 #define BLOCK_HDR_FREE_MAGIC (0xceadbeef)
58
59 /*
60 * fields present at the start of a block (this is for internal use only)
61 */
62 typedef struct _BLOCK_HDR
63 {
64 ULONG Magic;
65 ULONG Size;
66 LIST_ENTRY ListEntry;
67 ULONG Tag;
68 PVOID Caller;
69 struct _BLOCK_HDR* tag_next;
70 BOOLEAN Dumped;
71 } BLOCK_HDR;
72
73 /* GLOBALS *****************************************************************/
74
75 /*
76 * Memory managment initalized symbol for the base of the pool
77 */
78 static unsigned int kernel_pool_base = 0;
79
80 /*
81 * Head of the list of free blocks
82 */
83 static LIST_ENTRY FreeBlockListHead;
84
85 /*
86 * Head of the list of in use block
87 */
88 static LIST_ENTRY UsedBlockListHead;
89
90 /*
91 * Count of free blocks
92 */
93 static ULONG EiNrFreeBlocks = 0;
94
95 /*
96 * Count of used blocks
97 */
98 static ULONG EiNrUsedBlocks = 0;
99
100 /*
101 * Lock that protects the non-paged pool data structures
102 */
103 static KSPIN_LOCK MmNpoolLock;
104
105 /*
106 * Total memory used for free nonpaged pool blocks
107 */
108 ULONG EiFreeNonPagedPool = 0;
109
110 /*
111 * Total memory used for nonpaged pool blocks
112 */
113 ULONG EiUsedNonPagedPool = 0;
114
115 /*
116 * Allocate a range of memory in the nonpaged pool
117 */
118 PVOID
119 MiAllocNonPagedPoolRegion(unsigned int nr_pages);
120
121 #ifdef TAG_STATISTICS_TRACKING
122 #define TAG_HASH_TABLE_SIZE (1024)
123 static BLOCK_HDR* tag_hash_table[TAG_HASH_TABLE_SIZE];
124 #endif /* TAG_STATISTICS_TRACKING */
125
126 /* FUNCTIONS ***************************************************************/
127
128 #ifdef TAG_STATISTICS_TRACKING
129 VOID
130 MiRemoveFromTagHashTable(BLOCK_HDR* block)
131 /*
132 * Remove a block from the tag hash table
133 */
134 {
135 BLOCK_HDR* previous;
136 BLOCK_HDR* current;
137 ULONG hash;
138
139 if (block->Tag == 0)
140 {
141 return;
142 }
143
144 hash = block->Tag % TAG_HASH_TABLE_SIZE;
145
146 previous = NULL;
147 current = tag_hash_table[hash];
148 while (current != NULL)
149 {
150 if (current == block)
151 {
152 if (previous == NULL)
153 {
154 tag_hash_table[hash] = block->tag_next;
155 }
156 else
157 {
158 previous->tag_next = block->tag_next;
159 }
160 return;
161 }
162 previous = current;
163 current = current->tag_next;
164 }
165 DPRINT1("Tagged block wasn't on hash table list (Tag %x Caller %x)\n",
166 block->Tag, block->Caller);
167 KeBugCheck(0);
168 }
169
170 VOID
171 MiAddToTagHashTable(BLOCK_HDR* block)
172 /*
173 * Add a block to the tag hash table
174 */
175 {
176 ULONG hash;
177 BLOCK_HDR* current;
178 BLOCK_HDR* previous;
179
180 if (block->Tag == 0)
181 {
182 return;
183 }
184
185 hash = block->Tag % TAG_HASH_TABLE_SIZE;
186
187 previous = NULL;
188 current = tag_hash_table[hash];
189 while (current != NULL)
190 {
191 if (current->Tag == block->Tag)
192 {
193 block->tag_next = current->tag_next;
194 current->tag_next = block;
195 return;
196 }
197 previous = current;
198 if ((PVOID)current->tag_next >= (PVOID)0xc1123160)
199 {
200 DbgPrint("previous %x\n", previous);
201 }
202 current = current->tag_next;
203 }
204 block->tag_next = NULL;
205 if (previous == NULL)
206 {
207 tag_hash_table[hash] = block;
208 }
209 else
210 {
211 previous->tag_next = block;
212 }
213 }
214 #endif /* TAG_STATISTICS_TRACKING */
215
216 VOID
217 ExInitNonPagedPool(ULONG BaseAddress)
218 {
219 kernel_pool_base = BaseAddress;
220 KeInitializeSpinLock(&MmNpoolLock);
221 MmInitKernelMap((PVOID)BaseAddress);
222 memset(tag_hash_table, 0, sizeof(tag_hash_table));
223 InitializeListHead(&FreeBlockListHead);
224 InitializeListHead(&UsedBlockListHead);
225 }
226
227 #ifdef TAG_STATISTICS_TRACKING
228 VOID STATIC
229 MiDumpTagStats(ULONG CurrentTag, ULONG CurrentNrBlocks, ULONG CurrentSize)
230 {
231 CHAR c1, c2, c3, c4;
232
233 c1 = (CurrentTag >> 24) & 0xFF;
234 c2 = (CurrentTag >> 16) & 0xFF;
235 c3 = (CurrentTag >> 8) & 0xFF;
236 c4 = CurrentTag & 0xFF;
237
238 if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
239 {
240 DbgPrint("Tag %x (%c%c%c%c) Blocks %d Total Size %d Average Size %d\n",
241 CurrentTag, c4, c3, c2, c1, CurrentNrBlocks,
242 CurrentSize, CurrentSize / CurrentNrBlocks);
243 }
244 else
245 {
246 DbgPrint("Tag %x Blocks %d Total Size %d Average Size %d\n",
247 CurrentTag, CurrentNrBlocks, CurrentSize,
248 CurrentSize / CurrentNrBlocks);
249 }
250 }
251 #endif /* TAG_STATISTICS_TRACKING */
252
253 VOID
254 MiDebugDumpNonPagedPoolStats(BOOLEAN NewOnly)
255 {
256 #ifdef TAG_STATISTICS_TRACKING
257 ULONG i;
258 BLOCK_HDR* current;
259 ULONG CurrentTag;
260 ULONG CurrentNrBlocks;
261 ULONG CurrentSize;
262 ULONG TotalBlocks;
263 ULONG TotalSize;
264
265 DbgPrint("******* Dumping non paging pool stats ******\n");
266 TotalBlocks = 0;
267 TotalSize = 0;
268 for (i = 0; i < TAG_HASH_TABLE_SIZE; i++)
269 {
270 CurrentTag = 0;
271 CurrentNrBlocks = 0;
272 CurrentSize = 0;
273 current = tag_hash_table[i];
274 while (current != NULL)
275 {
276 if (current->Tag != CurrentTag)
277 {
278 if (CurrentTag != 0 && CurrentNrBlocks != 0)
279 {
280 MiDumpTagStats(CurrentTag, CurrentNrBlocks, CurrentSize);
281 }
282 CurrentTag = current->Tag;
283 CurrentNrBlocks = 0;
284 CurrentSize = 0;
285 }
286
287 if (!NewOnly || !current->Dumped)
288 {
289 CurrentNrBlocks++;
290 TotalBlocks++;
291 CurrentSize = CurrentSize + current->Size;
292 TotalSize = TotalSize + current->Size;
293 current->Dumped = TRUE;
294 }
295 current = current->tag_next;
296 }
297 if (CurrentTag != 0 && CurrentNrBlocks != 0)
298 {
299 MiDumpTagStats(CurrentTag, CurrentNrBlocks, CurrentSize);
300 }
301 }
302 if (TotalBlocks != 0)
303 {
304 DbgPrint("TotalBlocks %d TotalSize %d AverageSize %d\n",
305 TotalBlocks, TotalSize, TotalSize / TotalBlocks);
306 }
307 else
308 {
309 DbgPrint("TotalBlocks %d TotalSize %d\n",
310 TotalBlocks, TotalSize);
311 }
312 DbgPrint("***************** Dump Complete ***************\n");
313 #endif /* TAG_STATISTICS_TRACKING */
314 }
315
316 VOID
317 MiDebugDumpNonPagedPool(BOOLEAN NewOnly)
318 {
319 BLOCK_HDR* current;
320 PLIST_ENTRY current_entry;
321 KIRQL oldIrql;
322
323 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
324
325 DbgPrint("******* Dumping non paging pool contents ******\n");
326 current_entry = UsedBlockListHead.Flink;
327 while (current_entry != &UsedBlockListHead)
328 {
329 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
330 if (!NewOnly || !current->Dumped)
331 {
332 CHAR c1, c2, c3, c4;
333
334 c1 = (current->Tag >> 24) & 0xFF;
335 c2 = (current->Tag >> 16) & 0xFF;
336 c3 = (current->Tag >> 8) & 0xFF;
337 c4 = current->Tag & 0xFF;
338
339 if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
340 {
341 DbgPrint("Size 0x%x Tag 0x%x (%c%c%c%c) Allocator 0x%x\n",
342 current->Size, current->Tag, c4, c3, c2, c1,
343 current->Caller);
344 }
345 else
346 {
347 DbgPrint("Size 0x%x Tag 0x%x Allocator 0x%x\n",
348 current->Size, current->Tag, current->Caller);
349 }
350 current->Dumped = TRUE;
351 }
352 current_entry = current_entry->Flink;
353 }
354 DbgPrint("***************** Dump Complete ***************\n");
355 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
356 }
357
358 #ifdef ENABLE_VALIDATE_POOL
359 static void validate_free_list(void)
360 /*
361 * FUNCTION: Validate the integrity of the list of free blocks
362 */
363 {
364 BLOCK_HDR* current;
365 PLIST_ENTRY current_entry;
366 unsigned int blocks_seen=0;
367
368 current_entry = FreeBlockListHead.Flink;
369 while (current_entry != &FreeBlockListHead)
370 {
371 unsigned int base_addr;
372
373 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
374 base_addr = (int)current;
375
376 if (current->Magic != BLOCK_HDR_FREE_MAGIC)
377 {
378 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
379 current);
380 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
381 }
382
383 if (base_addr < (kernel_pool_base) ||
384 (base_addr+current->Size) > (kernel_pool_base)+NONPAGED_POOL_SIZE)
385 {
386 DbgPrint("Block %x found outside pool area\n",current);
387 DbgPrint("Size %d\n",current->Size);
388 DbgPrint("Limits are %x %x\n",kernel_pool_base,
389 kernel_pool_base+NONPAGED_POOL_SIZE);
390 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
391 }
392 blocks_seen++;
393 if (blocks_seen > EiNrFreeBlocks)
394 {
395 DbgPrint("Too many blocks on free list\n");
396 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
397 }
398 if (current->ListEntry.Flink != &FreeBlockListHead &&
399 current->ListEntry.Flink->Blink != &current->ListEntry)
400 {
401 DbgPrint("%s:%d:Break in list (current %x next %x "
402 "current->next->previous %x)\n",
403 __FILE__,__LINE__,current, current->ListEntry.Flink,
404 current->ListEntry.Flink->Blink);
405 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
406 }
407
408 current_entry = current_entry->Flink;
409 }
410 }
411
412 static void validate_used_list(void)
413 /*
414 * FUNCTION: Validate the integrity of the list of used blocks
415 */
416 {
417 BLOCK_HDR* current;
418 PLIST_ENTRY current_entry;
419 unsigned int blocks_seen=0;
420
421 current_entry = UsedBlockListHead.Flink;
422 while (current_entry != &UsedBlockListHead)
423 {
424 unsigned int base_addr;
425
426 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
427 base_addr = (int)current;
428
429 if (current->Magic != BLOCK_HDR_USED_MAGIC)
430 {
431 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
432 current);
433 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
434 }
435 if (base_addr < (kernel_pool_base) ||
436 (base_addr+current->Size) >
437 (kernel_pool_base)+NONPAGED_POOL_SIZE)
438 {
439 DbgPrint("Block %x found outside pool area\n",current);
440 for(;;);
441 }
442 blocks_seen++;
443 if (blocks_seen > EiNrUsedBlocks)
444 {
445 DbgPrint("Too many blocks on used list\n");
446 for(;;);
447 }
448 if (current->ListEntry.Flink != &UsedBlockListHead &&
449 current->ListEntry.Flink->Blink != &current->ListEntry)
450 {
451 DbgPrint("Break in list (current %x next %x)\n",
452 current, current->ListEntry.Flink);
453 for(;;);
454 }
455
456 current_entry = current_entry->Flink;
457 }
458 }
459
460 static void check_duplicates(BLOCK_HDR* blk)
461 /*
462 * FUNCTION: Check a block has no duplicates
463 * ARGUMENTS:
464 * blk = block to check
465 * NOTE: Bug checks if duplicates are found
466 */
467 {
468 unsigned int base = (int)blk;
469 unsigned int last = ((int)blk) + +sizeof(BLOCK_HDR) + blk->Size;
470 BLOCK_HDR* current;
471 PLIST_ENTRY current_entry;
472
473 current_entry = FreeBlockListHead.Flink;
474 while (current_entry != &FreeBlockListHead)
475 {
476 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
477
478 if (current->Magic != BLOCK_HDR_FREE_MAGIC)
479 {
480 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
481 current);
482 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
483 }
484
485 if ( (int)current > base && (int)current < last )
486 {
487 DbgPrint("intersecting blocks on list\n");
488 for(;;);
489 }
490 if ( (int)current < base &&
491 ((int)current + current->Size + sizeof(BLOCK_HDR))
492 > base )
493 {
494 DbgPrint("intersecting blocks on list\n");
495 for(;;);
496 }
497
498 current_entry = current_entry->Flink;
499 }
500
501 current_entry = UsedBlockListHead.Flink;
502 while (current_entry != &UsedBlockListHead)
503 {
504 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
505
506 if ( (int)current > base && (int)current < last )
507 {
508 DbgPrint("intersecting blocks on list\n");
509 for(;;);
510 }
511 if ( (int)current < base &&
512 ((int)current + current->Size + sizeof(BLOCK_HDR))
513 > base )
514 {
515 DbgPrint("intersecting blocks on list\n");
516 for(;;);
517 }
518
519 current_entry = current_entry->Flink;
520 }
521
522 }
523
524 static void validate_kernel_pool(void)
525 /*
526 * FUNCTION: Checks the integrity of the kernel memory heap
527 */
528 {
529 BLOCK_HDR* current;
530 PLIST_ENTRY current_entry;
531
532 validate_free_list();
533 validate_used_list();
534
535 current_entry = FreeBlockListHead.Flink;
536 while (current_entry != &FreeBlockListHead)
537 {
538 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
539 check_duplicates(current);
540 current_entry = current_entry->Flink;
541 }
542 current_entry = UsedBlockListHead.Flink;
543 while (current_entry != &UsedBlockListHead)
544 {
545 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
546 check_duplicates(current);
547 current_entry = current_entry->Flink;
548 }
549 }
550 #endif
551
552 #if 0
553 STATIC VOID
554 free_pages(BLOCK_HDR* blk)
555 {
556 ULONG start;
557 ULONG end;
558 ULONG i;
559
560 start = (ULONG)blk;
561 end = (ULONG)blk + sizeof(BLOCK_HDR) + blk->Size;
562
563 /*
564 * If the block doesn't contain a whole page then there is nothing to do
565 */
566 if (PAGE_ROUND_UP(start) >= PAGE_ROUND_DOWN(end))
567 {
568 return;
569 }
570 }
571 #endif
572
573 STATIC VOID
574 merge_free_block(BLOCK_HDR* blk)
575 {
576 PLIST_ENTRY next_entry;
577 BLOCK_HDR* next;
578 PLIST_ENTRY previous_entry;
579 BLOCK_HDR* previous;
580
581 next_entry = blk->ListEntry.Flink;
582 if (next_entry != &FreeBlockListHead)
583 {
584 next = CONTAINING_RECORD(next_entry, BLOCK_HDR, ListEntry);
585 if (((unsigned int)blk + sizeof(BLOCK_HDR) + blk->Size) ==
586 (unsigned int)next)
587 {
588 RemoveEntryList(&next->ListEntry);
589 blk->Size = blk->Size + sizeof(BLOCK_HDR) + next->Size;
590 EiNrFreeBlocks--;
591 }
592 }
593
594 previous_entry = blk->ListEntry.Blink;
595 if (previous_entry != &FreeBlockListHead)
596 {
597 previous = CONTAINING_RECORD(previous_entry, BLOCK_HDR, ListEntry);
598 if (((unsigned int)previous + sizeof(BLOCK_HDR) + previous->Size) ==
599 (unsigned int)blk)
600 {
601 RemoveEntryList(&blk->ListEntry);
602 previous->Size = previous->Size + sizeof(BLOCK_HDR) + blk->Size;
603 EiNrFreeBlocks--;
604 }
605 }
606 }
607
608 STATIC VOID
609 add_to_free_list(BLOCK_HDR* blk)
610 /*
611 * FUNCTION: add the block to the free list (internal)
612 */
613 {
614 PLIST_ENTRY current_entry;
615 BLOCK_HDR* current;
616
617 current_entry = FreeBlockListHead.Flink;
618 while (current_entry != &FreeBlockListHead)
619 {
620 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
621
622 if ((unsigned int)current > (unsigned int)blk)
623 {
624 blk->ListEntry.Flink = current_entry;
625 blk->ListEntry.Blink = current_entry->Blink;
626 current_entry->Blink->Flink = &blk->ListEntry;
627 current_entry->Blink = &blk->ListEntry;
628 EiNrFreeBlocks++;
629 return;
630 }
631
632 current_entry = current_entry->Flink;
633 }
634 InsertTailList(&FreeBlockListHead, &blk->ListEntry);
635 EiNrFreeBlocks++;
636 }
637
638 static void add_to_used_list(BLOCK_HDR* blk)
639 /*
640 * FUNCTION: add the block to the used list (internal)
641 */
642 {
643 InsertHeadList(&UsedBlockListHead, &blk->ListEntry);
644 EiNrUsedBlocks++;
645 }
646
647
648 static void remove_from_free_list(BLOCK_HDR* current)
649 {
650 RemoveEntryList(&current->ListEntry);
651 EiNrFreeBlocks--;
652 }
653
654
655 static void remove_from_used_list(BLOCK_HDR* current)
656 {
657 RemoveEntryList(&current->ListEntry);
658 EiNrUsedBlocks--;
659 }
660
661
662 inline static void* block_to_address(BLOCK_HDR* blk)
663 /*
664 * FUNCTION: Translate a block header address to the corresponding block
665 * address (internal)
666 */
667 {
668 return ( (void *) ((int)blk + sizeof(BLOCK_HDR)) );
669 }
670
671 inline static BLOCK_HDR* address_to_block(void* addr)
672 {
673 return (BLOCK_HDR *)
674 ( ((int)addr) - sizeof(BLOCK_HDR) );
675 }
676
677 static BLOCK_HDR* grow_kernel_pool(unsigned int size, ULONG Tag, PVOID Caller)
678 /*
679 * FUNCTION: Grow the executive heap to accomodate a block of at least 'size'
680 * bytes
681 */
682 {
683 unsigned int total_size = size + sizeof(BLOCK_HDR);
684 unsigned int nr_pages = PAGE_ROUND_UP(total_size) / PAGESIZE;
685 unsigned int start = (ULONG)MiAllocNonPagedPoolRegion(nr_pages);
686 BLOCK_HDR* used_blk=NULL;
687 BLOCK_HDR* free_blk=NULL;
688 int i;
689 NTSTATUS Status;
690
691 DPRINT("growing heap for block size %d, ",size);
692 DPRINT("start %x\n",start);
693
694 for (i=0;i<nr_pages;i++)
695 {
696 Status = MmCreateVirtualMapping(NULL,
697 (PVOID)(start + (i*PAGESIZE)),
698 PAGE_READWRITE,
699 (ULONG)MmAllocPage(0));
700 if (!NT_SUCCESS(Status))
701 {
702 DbgPrint("Unable to create virtual mapping\n");
703 KeBugCheck(0);
704 }
705 }
706
707
708 if ((PAGESIZE-(total_size%PAGESIZE))>(2*sizeof(BLOCK_HDR)))
709 {
710 used_blk = (struct _BLOCK_HDR *)start;
711 DPRINT("Creating block at %x\n",start);
712 used_blk->Magic = BLOCK_HDR_USED_MAGIC;
713 used_blk->Size = size;
714 add_to_used_list(used_blk);
715
716 free_blk = (BLOCK_HDR *)(start + sizeof(BLOCK_HDR) + size);
717 DPRINT("Creating block at %x\n",free_blk);
718 free_blk->Magic = BLOCK_HDR_FREE_MAGIC;
719 free_blk->Size = (nr_pages * PAGESIZE) -((sizeof(BLOCK_HDR)*2) + size);
720 add_to_free_list(free_blk);
721
722 EiFreeNonPagedPool = EiFreeNonPagedPool + free_blk->Size;
723 EiUsedNonPagedPool = EiUsedNonPagedPool + used_blk->Size;
724 }
725 else
726 {
727 used_blk = (struct _BLOCK_HDR *)start;
728 used_blk->Magic = BLOCK_HDR_USED_MAGIC;
729 used_blk->Size = (nr_pages * PAGESIZE) - sizeof(BLOCK_HDR);
730 add_to_used_list(used_blk);
731
732 EiUsedNonPagedPool = EiUsedNonPagedPool + used_blk->Size;
733 }
734
735 used_blk->Tag = Tag;
736 used_blk->Caller = Caller;
737 used_blk->Dumped = FALSE;
738 #ifdef TAG_STATISTICS_TRACKING
739 MiAddToTagHashTable(used_blk);
740 #endif /* TAG_STATISTICS_TRACKING */
741
742 VALIDATE_POOL;
743 return(used_blk);
744 }
745
746 static void* take_block(BLOCK_HDR* current, unsigned int size,
747 ULONG Tag, PVOID Caller)
748 /*
749 * FUNCTION: Allocate a used block of least 'size' from the specified
750 * free block
751 * RETURNS: The address of the created memory block
752 */
753 {
754 /*
755 * If the block is much bigger than required then split it and
756 * return a pointer to the allocated section. If the difference
757 * between the sizes is marginal it makes no sense to have the
758 * extra overhead
759 */
760 if (current->Size > (1 + size + sizeof(BLOCK_HDR)))
761 {
762 BLOCK_HDR* free_blk;
763
764 EiFreeNonPagedPool = EiFreeNonPagedPool - current->Size;
765
766 /*
767 * Replace the bigger block with a smaller block in the
768 * same position in the list
769 */
770 free_blk = (BLOCK_HDR *)(((int)current)
771 + sizeof(BLOCK_HDR) + size);
772 free_blk->Magic = BLOCK_HDR_FREE_MAGIC;
773 InsertHeadList(&current->ListEntry, &free_blk->ListEntry);
774 free_blk->Size = current->Size - (sizeof(BLOCK_HDR) + size);
775
776 current->Size=size;
777 RemoveEntryList(&current->ListEntry);
778 InsertHeadList(&UsedBlockListHead, &current->ListEntry);
779 EiNrUsedBlocks++;
780 current->Magic = BLOCK_HDR_USED_MAGIC;
781 current->Tag = Tag;
782 current->Caller = Caller;
783 current->Dumped = FALSE;
784 #ifdef TAG_STATISTICS_TRACKING
785 MiAddToTagHashTable(current);
786 #endif /* TAG_STATISTICS_TRACKING */
787
788 EiUsedNonPagedPool = EiUsedNonPagedPool + current->Size;
789 EiFreeNonPagedPool = EiFreeNonPagedPool + free_blk->Size;
790
791 VALIDATE_POOL;
792 return(block_to_address(current));
793 }
794
795 /*
796 * Otherwise allocate the whole block
797 */
798 remove_from_free_list(current);
799 add_to_used_list(current);
800
801 EiFreeNonPagedPool = EiFreeNonPagedPool - current->Size;
802 EiUsedNonPagedPool = EiUsedNonPagedPool + current->Size;
803
804 current->Magic = BLOCK_HDR_USED_MAGIC;
805 current->Tag = Tag;
806 current->Caller = Caller;
807 current->Dumped = FALSE;
808 #ifdef TAG_STATISTICS_TRACKING
809 MiAddToTagHashTable(current);
810 #endif /* TAG_STATISTICS_TRACKING */
811
812 VALIDATE_POOL;
813 return(block_to_address(current));
814 }
815
816 VOID STDCALL ExFreePool (PVOID block)
817 /*
818 * FUNCTION: Releases previously allocated memory
819 * ARGUMENTS:
820 * block = block to free
821 */
822 {
823 BLOCK_HDR* blk=address_to_block(block);
824 KIRQL oldIrql;
825
826 if( !block )
827 return;
828
829 DPRINT("freeing block %x\n",blk);
830
831 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->size,
832 ((PULONG)&block)[-1]);
833
834 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
835
836 VALIDATE_POOL;
837
838 if (blk->Magic != BLOCK_HDR_USED_MAGIC)
839 {
840 if (blk->Magic == BLOCK_HDR_FREE_MAGIC)
841 {
842 DbgPrint("ExFreePool of already freed address %x\n", block);
843 }
844 else
845 {
846 DbgPrint("ExFreePool of non-allocated address %x (magic %x)\n",
847 block, blk->Magic);
848 }
849 KeBugCheck(0);
850 return;
851 }
852
853 memset(block, 0xcc, blk->Size);
854
855 #ifdef TAG_STATISTICS_TRACKING
856 MiRemoveFromTagHashTable(blk);
857 #endif /* TAG_STATISTICS_TRACKING */
858 remove_from_used_list(blk);
859 blk->Magic = BLOCK_HDR_FREE_MAGIC;
860 add_to_free_list(blk);
861 merge_free_block(blk);
862
863 EiUsedNonPagedPool = EiUsedNonPagedPool - blk->Size;
864 EiFreeNonPagedPool = EiFreeNonPagedPool + blk->Size;
865
866 VALIDATE_POOL;
867
868 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
869 }
870
871 PVOID STDCALL
872 ExAllocateNonPagedPoolWithTag(ULONG Type, ULONG Size, ULONG Tag, PVOID Caller)
873 {
874 BLOCK_HDR* current = NULL;
875 PLIST_ENTRY current_entry;
876 PVOID block;
877 BLOCK_HDR* best = NULL;
878 KIRQL oldIrql;
879
880 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
881 Size,Caller);
882
883 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
884
885 VALIDATE_POOL;
886
887 /*
888 * accomodate this useful idiom
889 */
890 if (Size == 0)
891 {
892 POOL_TRACE("= NULL\n");
893 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
894 return(NULL);
895 }
896
897 /*
898 * Look for an already created block of sufficent size
899 */
900 current_entry = FreeBlockListHead.Flink;
901 while (current_entry != &FreeBlockListHead)
902 {
903 DPRINT("current %x size %x tag_next %x\n",current,current->Size,
904 current->tag_next);
905 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
906 if (current->Size >= Size &&
907 (best == NULL || current->Size < best->Size))
908 {
909 best = current;
910 }
911 current_entry = current_entry->Flink;
912 }
913 if (best != NULL)
914 {
915 block=take_block(best, Size, Tag, Caller);
916 VALIDATE_POOL;
917 memset(block,0,Size);
918 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
919 return(block);
920 }
921
922
923 /*
924 * Otherwise create a new block
925 */
926 block=block_to_address(grow_kernel_pool(Size, Tag, Caller));
927 VALIDATE_POOL;
928 memset(block, 0, Size);
929 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
930 return(block);
931 }
932
933
934 /* EOF */