include/csrss/csrss.h: Include a define for the size of the common
[reactos.git] / reactos / ntoskrnl / mm / npool.c
1 /* $Id: npool.c,v 1.49 2001/11/25 15:21:11 dwelch Exp $
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/npool.c
6 * PURPOSE: Implements the kernel memory pool
7 * PROGRAMMER: David Welch (welch@cwcom.net)
8 * UPDATE HISTORY:
9 * 27/05/98: Created
10 * 10/06/98: Bug fixes by Iwan Fatahi (i_fatahi@hotmail.com)
11 * in take_block (if current bigger than required)
12 * in remove_from_used_list
13 * in ExFreePool
14 * 23/08/98: Fixes from Robert Bergkvist (fragdance@hotmail.com)
15 */
16
17 /* INCLUDES ****************************************************************/
18
19 #include <ddk/ntddk.h>
20 #include <internal/mm.h>
21 #include <internal/bitops.h>
22 #include <internal/ntoskrnl.h>
23 #include <internal/pool.h>
24
25 #define NDEBUG
26 #include <internal/debug.h>
27
28 /* Enable strict checking of the nonpaged pool on every allocation */
29 //#define ENABLE_VALIDATE_POOL
30
31 /* Enable tracking of statistics about the tagged blocks in the pool */
32 #define TAG_STATISTICS_TRACKING
33
34 /*
35 * Put each block in its own range of pages and position the block at the
36 * end of the range so any accesses beyond the end of block are to invalid
37 * memory locations.
38 */
39 //#define WHOLE_PAGE_ALLOCATIONS
40
41 #ifdef ENABLE_VALIDATE_POOL
42 #define VALIDATE_POOL validate_kernel_pool()
43 #else
44 #define VALIDATE_POOL
45 #endif
46
47 #if 0
48 #define POOL_TRACE(args...) do { DbgPrint(args); } while(0);
49 #else
50 #define POOL_TRACE(args...)
51 #endif
52
53 /* TYPES *******************************************************************/
54
55 #define BLOCK_HDR_USED_MAGIC (0xdeadbeef)
56 #define BLOCK_HDR_FREE_MAGIC (0xceadbeef)
57
58 /*
59 * fields present at the start of a block (this is for internal use only)
60 */
61 typedef struct _BLOCK_HDR
62 {
63 ULONG Magic;
64 ULONG Size;
65 LIST_ENTRY ListEntry;
66 ULONG Tag;
67 PVOID Caller;
68 struct _BLOCK_HDR* tag_next;
69 BOOLEAN Dumped;
70 } BLOCK_HDR;
71
72 PVOID STDCALL
73 ExAllocateWholePageBlock(ULONG Size);
74 VOID STDCALL
75 ExFreeWholePageBlock(PVOID Addr);
76
77 /* GLOBALS *****************************************************************/
78
79 /*
80 * Memory managment initalized symbol for the base of the pool
81 */
82 static unsigned int kernel_pool_base = 0;
83
84 /*
85 * Head of the list of free blocks
86 */
87 static LIST_ENTRY FreeBlockListHead;
88
89 /*
90 * Head of the list of in use block
91 */
92 static LIST_ENTRY UsedBlockListHead;
93
94 #ifndef WHOLE_PAGE_ALLOCATIONS
95 /*
96 * Count of free blocks
97 */
98 static ULONG EiNrFreeBlocks = 0;
99
100 /*
101 * Count of used blocks
102 */
103 static ULONG EiNrUsedBlocks = 0;
104 #endif
105
106 /*
107 * Lock that protects the non-paged pool data structures
108 */
109 static KSPIN_LOCK MmNpoolLock;
110
111 /*
112 * Total memory used for free nonpaged pool blocks
113 */
114 ULONG EiFreeNonPagedPool = 0;
115
116 /*
117 * Total memory used for nonpaged pool blocks
118 */
119 ULONG EiUsedNonPagedPool = 0;
120
121 /*
122 * Allocate a range of memory in the nonpaged pool
123 */
124 PVOID
125 MiAllocNonPagedPoolRegion(unsigned int nr_pages);
126
127 VOID
128 MiFreeNonPagedPoolRegion(PVOID Addr, ULONG Count, BOOLEAN Free);
129
130 #ifdef TAG_STATISTICS_TRACKING
131 #define TAG_HASH_TABLE_SIZE (1024)
132 static BLOCK_HDR* tag_hash_table[TAG_HASH_TABLE_SIZE];
133 #endif /* TAG_STATISTICS_TRACKING */
134
135 /* FUNCTIONS ***************************************************************/
136
137 #ifdef TAG_STATISTICS_TRACKING
138 VOID
139 MiRemoveFromTagHashTable(BLOCK_HDR* block)
140 /*
141 * Remove a block from the tag hash table
142 */
143 {
144 BLOCK_HDR* previous;
145 BLOCK_HDR* current;
146 ULONG hash;
147
148 if (block->Tag == 0)
149 {
150 return;
151 }
152
153 hash = block->Tag % TAG_HASH_TABLE_SIZE;
154
155 previous = NULL;
156 current = tag_hash_table[hash];
157 while (current != NULL)
158 {
159 if (current == block)
160 {
161 if (previous == NULL)
162 {
163 tag_hash_table[hash] = block->tag_next;
164 }
165 else
166 {
167 previous->tag_next = block->tag_next;
168 }
169 return;
170 }
171 previous = current;
172 current = current->tag_next;
173 }
174 DPRINT1("Tagged block wasn't on hash table list (Tag %x Caller %x)\n",
175 block->Tag, block->Caller);
176 KeBugCheck(0);
177 }
178
179 VOID
180 MiAddToTagHashTable(BLOCK_HDR* block)
181 /*
182 * Add a block to the tag hash table
183 */
184 {
185 ULONG hash;
186 BLOCK_HDR* current;
187 BLOCK_HDR* previous;
188
189 if (block->Tag == 0)
190 {
191 return;
192 }
193
194 hash = block->Tag % TAG_HASH_TABLE_SIZE;
195
196 previous = NULL;
197 current = tag_hash_table[hash];
198 while (current != NULL)
199 {
200 if (current->Tag == block->Tag)
201 {
202 block->tag_next = current->tag_next;
203 current->tag_next = block;
204 return;
205 }
206 previous = current;
207 if ((PVOID)current->tag_next >= (PVOID)0xc1123160)
208 {
209 DbgPrint("previous %x\n", previous);
210 }
211 current = current->tag_next;
212 }
213 block->tag_next = NULL;
214 if (previous == NULL)
215 {
216 tag_hash_table[hash] = block;
217 }
218 else
219 {
220 previous->tag_next = block;
221 }
222 }
223 #endif /* TAG_STATISTICS_TRACKING */
224
225 VOID
226 ExInitNonPagedPool(ULONG BaseAddress)
227 {
228 kernel_pool_base = BaseAddress;
229 KeInitializeSpinLock(&MmNpoolLock);
230 MmInitKernelMap((PVOID)BaseAddress);
231 memset(tag_hash_table, 0, sizeof(tag_hash_table));
232 InitializeListHead(&FreeBlockListHead);
233 InitializeListHead(&UsedBlockListHead);
234 }
235
236 #ifdef TAG_STATISTICS_TRACKING
237 VOID STATIC
238 MiDumpTagStats(ULONG CurrentTag, ULONG CurrentNrBlocks, ULONG CurrentSize)
239 {
240 CHAR c1, c2, c3, c4;
241
242 c1 = (CurrentTag >> 24) & 0xFF;
243 c2 = (CurrentTag >> 16) & 0xFF;
244 c3 = (CurrentTag >> 8) & 0xFF;
245 c4 = CurrentTag & 0xFF;
246
247 if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
248 {
249 DbgPrint("Tag %x (%c%c%c%c) Blocks %d Total Size %d Average Size %d\n",
250 CurrentTag, c4, c3, c2, c1, CurrentNrBlocks,
251 CurrentSize, CurrentSize / CurrentNrBlocks);
252 }
253 else
254 {
255 DbgPrint("Tag %x Blocks %d Total Size %d Average Size %d\n",
256 CurrentTag, CurrentNrBlocks, CurrentSize,
257 CurrentSize / CurrentNrBlocks);
258 }
259 }
260 #endif /* TAG_STATISTICS_TRACKING */
261
262 VOID
263 MiDebugDumpNonPagedPoolStats(BOOLEAN NewOnly)
264 {
265 #ifdef TAG_STATISTICS_TRACKING
266 ULONG i;
267 BLOCK_HDR* current;
268 ULONG CurrentTag;
269 ULONG CurrentNrBlocks;
270 ULONG CurrentSize;
271 ULONG TotalBlocks;
272 ULONG TotalSize;
273
274 DbgPrint("******* Dumping non paging pool stats ******\n");
275 TotalBlocks = 0;
276 TotalSize = 0;
277 for (i = 0; i < TAG_HASH_TABLE_SIZE; i++)
278 {
279 CurrentTag = 0;
280 CurrentNrBlocks = 0;
281 CurrentSize = 0;
282 current = tag_hash_table[i];
283 while (current != NULL)
284 {
285 if (current->Tag != CurrentTag)
286 {
287 if (CurrentTag != 0 && CurrentNrBlocks != 0)
288 {
289 MiDumpTagStats(CurrentTag, CurrentNrBlocks, CurrentSize);
290 }
291 CurrentTag = current->Tag;
292 CurrentNrBlocks = 0;
293 CurrentSize = 0;
294 }
295
296 if (!NewOnly || !current->Dumped)
297 {
298 CurrentNrBlocks++;
299 TotalBlocks++;
300 CurrentSize = CurrentSize + current->Size;
301 TotalSize = TotalSize + current->Size;
302 current->Dumped = TRUE;
303 }
304 current = current->tag_next;
305 }
306 if (CurrentTag != 0 && CurrentNrBlocks != 0)
307 {
308 MiDumpTagStats(CurrentTag, CurrentNrBlocks, CurrentSize);
309 }
310 }
311 if (TotalBlocks != 0)
312 {
313 DbgPrint("TotalBlocks %d TotalSize %d AverageSize %d\n",
314 TotalBlocks, TotalSize, TotalSize / TotalBlocks);
315 }
316 else
317 {
318 DbgPrint("TotalBlocks %d TotalSize %d\n",
319 TotalBlocks, TotalSize);
320 }
321 DbgPrint("***************** Dump Complete ***************\n");
322 #endif /* TAG_STATISTICS_TRACKING */
323 }
324
325 VOID
326 MiDebugDumpNonPagedPool(BOOLEAN NewOnly)
327 {
328 BLOCK_HDR* current;
329 PLIST_ENTRY current_entry;
330 KIRQL oldIrql;
331
332 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
333
334 DbgPrint("******* Dumping non paging pool contents ******\n");
335 current_entry = UsedBlockListHead.Flink;
336 while (current_entry != &UsedBlockListHead)
337 {
338 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
339 if (!NewOnly || !current->Dumped)
340 {
341 CHAR c1, c2, c3, c4;
342
343 c1 = (current->Tag >> 24) & 0xFF;
344 c2 = (current->Tag >> 16) & 0xFF;
345 c3 = (current->Tag >> 8) & 0xFF;
346 c4 = current->Tag & 0xFF;
347
348 if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
349 {
350 DbgPrint("Size 0x%x Tag 0x%x (%c%c%c%c) Allocator 0x%x\n",
351 current->Size, current->Tag, c4, c3, c2, c1,
352 current->Caller);
353 }
354 else
355 {
356 DbgPrint("Size 0x%x Tag 0x%x Allocator 0x%x\n",
357 current->Size, current->Tag, current->Caller);
358 }
359 current->Dumped = TRUE;
360 }
361 current_entry = current_entry->Flink;
362 }
363 DbgPrint("***************** Dump Complete ***************\n");
364 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
365 }
366
367 #ifndef WHOLE_PAGE_ALLOCATIONS
368
369 #ifdef ENABLE_VALIDATE_POOL
370 static void validate_free_list(void)
371 /*
372 * FUNCTION: Validate the integrity of the list of free blocks
373 */
374 {
375 BLOCK_HDR* current;
376 PLIST_ENTRY current_entry;
377 unsigned int blocks_seen=0;
378
379 current_entry = FreeBlockListHead.Flink;
380 while (current_entry != &FreeBlockListHead)
381 {
382 unsigned int base_addr;
383
384 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
385 base_addr = (int)current;
386
387 if (current->Magic != BLOCK_HDR_FREE_MAGIC)
388 {
389 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
390 current);
391 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
392 }
393
394 if (base_addr < (kernel_pool_base) ||
395 (base_addr+current->Size) > (kernel_pool_base)+NONPAGED_POOL_SIZE)
396 {
397 DbgPrint("Block %x found outside pool area\n",current);
398 DbgPrint("Size %d\n",current->Size);
399 DbgPrint("Limits are %x %x\n",kernel_pool_base,
400 kernel_pool_base+NONPAGED_POOL_SIZE);
401 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
402 }
403 blocks_seen++;
404 if (blocks_seen > EiNrFreeBlocks)
405 {
406 DbgPrint("Too many blocks on free list\n");
407 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
408 }
409 if (current->ListEntry.Flink != &FreeBlockListHead &&
410 current->ListEntry.Flink->Blink != &current->ListEntry)
411 {
412 DbgPrint("%s:%d:Break in list (current %x next %x "
413 "current->next->previous %x)\n",
414 __FILE__,__LINE__,current, current->ListEntry.Flink,
415 current->ListEntry.Flink->Blink);
416 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
417 }
418
419 current_entry = current_entry->Flink;
420 }
421 }
422
423 static void validate_used_list(void)
424 /*
425 * FUNCTION: Validate the integrity of the list of used blocks
426 */
427 {
428 BLOCK_HDR* current;
429 PLIST_ENTRY current_entry;
430 unsigned int blocks_seen=0;
431
432 current_entry = UsedBlockListHead.Flink;
433 while (current_entry != &UsedBlockListHead)
434 {
435 unsigned int base_addr;
436
437 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
438 base_addr = (int)current;
439
440 if (current->Magic != BLOCK_HDR_USED_MAGIC)
441 {
442 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
443 current);
444 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
445 }
446 if (base_addr < (kernel_pool_base) ||
447 (base_addr+current->Size) >
448 (kernel_pool_base)+NONPAGED_POOL_SIZE)
449 {
450 DbgPrint("Block %x found outside pool area\n",current);
451 for(;;);
452 }
453 blocks_seen++;
454 if (blocks_seen > EiNrUsedBlocks)
455 {
456 DbgPrint("Too many blocks on used list\n");
457 for(;;);
458 }
459 if (current->ListEntry.Flink != &UsedBlockListHead &&
460 current->ListEntry.Flink->Blink != &current->ListEntry)
461 {
462 DbgPrint("Break in list (current %x next %x)\n",
463 current, current->ListEntry.Flink);
464 for(;;);
465 }
466
467 current_entry = current_entry->Flink;
468 }
469 }
470
471 static void check_duplicates(BLOCK_HDR* blk)
472 /*
473 * FUNCTION: Check a block has no duplicates
474 * ARGUMENTS:
475 * blk = block to check
476 * NOTE: Bug checks if duplicates are found
477 */
478 {
479 unsigned int base = (int)blk;
480 unsigned int last = ((int)blk) + +sizeof(BLOCK_HDR) + blk->Size;
481 BLOCK_HDR* current;
482 PLIST_ENTRY current_entry;
483
484 current_entry = FreeBlockListHead.Flink;
485 while (current_entry != &FreeBlockListHead)
486 {
487 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
488
489 if (current->Magic != BLOCK_HDR_FREE_MAGIC)
490 {
491 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
492 current);
493 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
494 }
495
496 if ( (int)current > base && (int)current < last )
497 {
498 DbgPrint("intersecting blocks on list\n");
499 for(;;);
500 }
501 if ( (int)current < base &&
502 ((int)current + current->Size + sizeof(BLOCK_HDR))
503 > base )
504 {
505 DbgPrint("intersecting blocks on list\n");
506 for(;;);
507 }
508
509 current_entry = current_entry->Flink;
510 }
511
512 current_entry = UsedBlockListHead.Flink;
513 while (current_entry != &UsedBlockListHead)
514 {
515 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
516
517 if ( (int)current > base && (int)current < last )
518 {
519 DbgPrint("intersecting blocks on list\n");
520 for(;;);
521 }
522 if ( (int)current < base &&
523 ((int)current + current->Size + sizeof(BLOCK_HDR))
524 > base )
525 {
526 DbgPrint("intersecting blocks on list\n");
527 for(;;);
528 }
529
530 current_entry = current_entry->Flink;
531 }
532
533 }
534
535 static void validate_kernel_pool(void)
536 /*
537 * FUNCTION: Checks the integrity of the kernel memory heap
538 */
539 {
540 BLOCK_HDR* current;
541 PLIST_ENTRY current_entry;
542
543 validate_free_list();
544 validate_used_list();
545
546 current_entry = FreeBlockListHead.Flink;
547 while (current_entry != &FreeBlockListHead)
548 {
549 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
550 check_duplicates(current);
551 current_entry = current_entry->Flink;
552 }
553 current_entry = UsedBlockListHead.Flink;
554 while (current_entry != &UsedBlockListHead)
555 {
556 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
557 check_duplicates(current);
558 current_entry = current_entry->Flink;
559 }
560 }
561 #endif
562
563 #if 0
564 STATIC VOID
565 free_pages(BLOCK_HDR* blk)
566 {
567 ULONG start;
568 ULONG end;
569 ULONG i;
570
571 start = (ULONG)blk;
572 end = (ULONG)blk + sizeof(BLOCK_HDR) + blk->Size;
573
574 /*
575 * If the block doesn't contain a whole page then there is nothing to do
576 */
577 if (PAGE_ROUND_UP(start) >= PAGE_ROUND_DOWN(end))
578 {
579 return;
580 }
581 }
582 #endif
583
584 STATIC VOID
585 merge_free_block(BLOCK_HDR* blk)
586 {
587 PLIST_ENTRY next_entry;
588 BLOCK_HDR* next;
589 PLIST_ENTRY previous_entry;
590 BLOCK_HDR* previous;
591
592 next_entry = blk->ListEntry.Flink;
593 if (next_entry != &FreeBlockListHead)
594 {
595 next = CONTAINING_RECORD(next_entry, BLOCK_HDR, ListEntry);
596 if (((unsigned int)blk + sizeof(BLOCK_HDR) + blk->Size) ==
597 (unsigned int)next)
598 {
599 RemoveEntryList(&next->ListEntry);
600 blk->Size = blk->Size + sizeof(BLOCK_HDR) + next->Size;
601 EiNrFreeBlocks--;
602 }
603 }
604
605 previous_entry = blk->ListEntry.Blink;
606 if (previous_entry != &FreeBlockListHead)
607 {
608 previous = CONTAINING_RECORD(previous_entry, BLOCK_HDR, ListEntry);
609 if (((unsigned int)previous + sizeof(BLOCK_HDR) + previous->Size) ==
610 (unsigned int)blk)
611 {
612 RemoveEntryList(&blk->ListEntry);
613 previous->Size = previous->Size + sizeof(BLOCK_HDR) + blk->Size;
614 EiNrFreeBlocks--;
615 }
616 }
617 }
618
619 STATIC VOID
620 add_to_free_list(BLOCK_HDR* blk)
621 /*
622 * FUNCTION: add the block to the free list (internal)
623 */
624 {
625 PLIST_ENTRY current_entry;
626 BLOCK_HDR* current;
627
628 current_entry = FreeBlockListHead.Flink;
629 while (current_entry != &FreeBlockListHead)
630 {
631 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
632
633 if ((unsigned int)current > (unsigned int)blk)
634 {
635 blk->ListEntry.Flink = current_entry;
636 blk->ListEntry.Blink = current_entry->Blink;
637 current_entry->Blink->Flink = &blk->ListEntry;
638 current_entry->Blink = &blk->ListEntry;
639 EiNrFreeBlocks++;
640 return;
641 }
642
643 current_entry = current_entry->Flink;
644 }
645 InsertTailList(&FreeBlockListHead, &blk->ListEntry);
646 EiNrFreeBlocks++;
647 }
648
649 static void add_to_used_list(BLOCK_HDR* blk)
650 /*
651 * FUNCTION: add the block to the used list (internal)
652 */
653 {
654 InsertHeadList(&UsedBlockListHead, &blk->ListEntry);
655 EiNrUsedBlocks++;
656 }
657
658
659 static void remove_from_free_list(BLOCK_HDR* current)
660 {
661 RemoveEntryList(&current->ListEntry);
662 EiNrFreeBlocks--;
663 }
664
665
666 static void remove_from_used_list(BLOCK_HDR* current)
667 {
668 RemoveEntryList(&current->ListEntry);
669 EiNrUsedBlocks--;
670 }
671
672
673 inline static void* block_to_address(BLOCK_HDR* blk)
674 /*
675 * FUNCTION: Translate a block header address to the corresponding block
676 * address (internal)
677 */
678 {
679 return ( (void *) ((int)blk + sizeof(BLOCK_HDR)) );
680 }
681
682 inline static BLOCK_HDR* address_to_block(void* addr)
683 {
684 return (BLOCK_HDR *)
685 ( ((int)addr) - sizeof(BLOCK_HDR) );
686 }
687
688 static BLOCK_HDR* grow_kernel_pool(unsigned int size, ULONG Tag, PVOID Caller)
689 /*
690 * FUNCTION: Grow the executive heap to accomodate a block of at least 'size'
691 * bytes
692 */
693 {
694 unsigned int total_size = size + sizeof(BLOCK_HDR);
695 unsigned int nr_pages = PAGE_ROUND_UP(total_size) / PAGESIZE;
696 unsigned int start = (ULONG)MiAllocNonPagedPoolRegion(nr_pages);
697 BLOCK_HDR* used_blk=NULL;
698 BLOCK_HDR* free_blk=NULL;
699 int i;
700 NTSTATUS Status;
701
702 DPRINT("growing heap for block size %d, ",size);
703 DPRINT("start %x\n",start);
704
705 for (i=0;i<nr_pages;i++)
706 {
707 Status = MmCreateVirtualMapping(NULL,
708 (PVOID)(start + (i*PAGESIZE)),
709 PAGE_READWRITE,
710 (ULONG)MmAllocPage(0));
711 if (!NT_SUCCESS(Status))
712 {
713 DbgPrint("Unable to create virtual mapping\n");
714 KeBugCheck(0);
715 }
716 }
717
718
719 if ((PAGESIZE-(total_size%PAGESIZE))>(2*sizeof(BLOCK_HDR)))
720 {
721 used_blk = (struct _BLOCK_HDR *)start;
722 DPRINT("Creating block at %x\n",start);
723 used_blk->Magic = BLOCK_HDR_USED_MAGIC;
724 used_blk->Size = size;
725 add_to_used_list(used_blk);
726
727 free_blk = (BLOCK_HDR *)(start + sizeof(BLOCK_HDR) + size);
728 DPRINT("Creating block at %x\n",free_blk);
729 free_blk->Magic = BLOCK_HDR_FREE_MAGIC;
730 free_blk->Size = (nr_pages * PAGESIZE) -((sizeof(BLOCK_HDR)*2) + size);
731 add_to_free_list(free_blk);
732
733 EiFreeNonPagedPool = EiFreeNonPagedPool + free_blk->Size;
734 EiUsedNonPagedPool = EiUsedNonPagedPool + used_blk->Size;
735 }
736 else
737 {
738 used_blk = (struct _BLOCK_HDR *)start;
739 used_blk->Magic = BLOCK_HDR_USED_MAGIC;
740 used_blk->Size = (nr_pages * PAGESIZE) - sizeof(BLOCK_HDR);
741 add_to_used_list(used_blk);
742
743 EiUsedNonPagedPool = EiUsedNonPagedPool + used_blk->Size;
744 }
745
746 used_blk->Tag = Tag;
747 used_blk->Caller = Caller;
748 used_blk->Dumped = FALSE;
749 #ifdef TAG_STATISTICS_TRACKING
750 MiAddToTagHashTable(used_blk);
751 #endif /* TAG_STATISTICS_TRACKING */
752
753 VALIDATE_POOL;
754 return(used_blk);
755 }
756
757 static void* take_block(BLOCK_HDR* current, unsigned int size,
758 ULONG Tag, PVOID Caller)
759 /*
760 * FUNCTION: Allocate a used block of least 'size' from the specified
761 * free block
762 * RETURNS: The address of the created memory block
763 */
764 {
765 /*
766 * If the block is much bigger than required then split it and
767 * return a pointer to the allocated section. If the difference
768 * between the sizes is marginal it makes no sense to have the
769 * extra overhead
770 */
771 if (current->Size > (1 + size + sizeof(BLOCK_HDR)))
772 {
773 BLOCK_HDR* free_blk;
774
775 EiFreeNonPagedPool = EiFreeNonPagedPool - current->Size;
776
777 /*
778 * Replace the bigger block with a smaller block in the
779 * same position in the list
780 */
781 free_blk = (BLOCK_HDR *)(((int)current)
782 + sizeof(BLOCK_HDR) + size);
783 free_blk->Magic = BLOCK_HDR_FREE_MAGIC;
784 InsertHeadList(&current->ListEntry, &free_blk->ListEntry);
785 free_blk->Size = current->Size - (sizeof(BLOCK_HDR) + size);
786
787 current->Size=size;
788 RemoveEntryList(&current->ListEntry);
789 InsertHeadList(&UsedBlockListHead, &current->ListEntry);
790 EiNrUsedBlocks++;
791 current->Magic = BLOCK_HDR_USED_MAGIC;
792 current->Tag = Tag;
793 current->Caller = Caller;
794 current->Dumped = FALSE;
795 #ifdef TAG_STATISTICS_TRACKING
796 MiAddToTagHashTable(current);
797 #endif /* TAG_STATISTICS_TRACKING */
798
799 EiUsedNonPagedPool = EiUsedNonPagedPool + current->Size;
800 EiFreeNonPagedPool = EiFreeNonPagedPool + free_blk->Size;
801
802 VALIDATE_POOL;
803 return(block_to_address(current));
804 }
805
806 /*
807 * Otherwise allocate the whole block
808 */
809 remove_from_free_list(current);
810 add_to_used_list(current);
811
812 EiFreeNonPagedPool = EiFreeNonPagedPool - current->Size;
813 EiUsedNonPagedPool = EiUsedNonPagedPool + current->Size;
814
815 current->Magic = BLOCK_HDR_USED_MAGIC;
816 current->Tag = Tag;
817 current->Caller = Caller;
818 current->Dumped = FALSE;
819 #ifdef TAG_STATISTICS_TRACKING
820 MiAddToTagHashTable(current);
821 #endif /* TAG_STATISTICS_TRACKING */
822
823 VALIDATE_POOL;
824 return(block_to_address(current));
825 }
826
827 #endif /* not WHOLE_PAGE_ALLOCATIONS */
828
829 VOID STDCALL ExFreePool (PVOID block)
830 /*
831 * FUNCTION: Releases previously allocated memory
832 * ARGUMENTS:
833 * block = block to free
834 */
835 {
836 #ifdef WHOLE_PAGE_ALLOCATIONS /* WHOLE_PAGE_ALLOCATIONS */
837 KIRQL oldIrql;
838
839 if (block == NULL)
840 {
841 return;
842 }
843
844 DPRINT("freeing block %x\n",blk);
845
846 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->size,
847 ((PULONG)&block)[-1]);
848
849 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
850
851 ExFreeWholePageBlock(block);
852 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
853
854 #else /* not WHOLE_PAGE_ALLOCATIONS */
855
856 BLOCK_HDR* blk=address_to_block(block);
857 KIRQL oldIrql;
858
859 if (block == NULL)
860 {
861 return;
862 }
863
864 DPRINT("freeing block %x\n",blk);
865
866 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->size,
867 ((PULONG)&block)[-1]);
868
869 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
870
871 VALIDATE_POOL;
872
873 if (blk->Magic != BLOCK_HDR_USED_MAGIC)
874 {
875 if (blk->Magic == BLOCK_HDR_FREE_MAGIC)
876 {
877 DbgPrint("ExFreePool of already freed address %x\n", block);
878 }
879 else
880 {
881 DbgPrint("ExFreePool of non-allocated address %x (magic %x)\n",
882 block, blk->Magic);
883 }
884 KeBugCheck(0);
885 return;
886 }
887
888 memset(block, 0xcc, blk->Size);
889
890 #ifdef TAG_STATISTICS_TRACKING
891 MiRemoveFromTagHashTable(blk);
892 #endif /* TAG_STATISTICS_TRACKING */
893 remove_from_used_list(blk);
894 blk->Magic = BLOCK_HDR_FREE_MAGIC;
895 add_to_free_list(blk);
896 merge_free_block(blk);
897
898 EiUsedNonPagedPool = EiUsedNonPagedPool - blk->Size;
899 EiFreeNonPagedPool = EiFreeNonPagedPool + blk->Size;
900 VALIDATE_POOL;
901 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
902
903 #endif /* WHOLE_PAGE_ALLOCATIONS */
904 }
905
906 PVOID STDCALL
907 ExAllocateNonPagedPoolWithTag(ULONG Type, ULONG Size, ULONG Tag, PVOID Caller)
908 {
909 #ifdef WHOLE_PAGE_ALLOCATIONS
910 PVOID block;
911 KIRQL oldIrql;
912
913 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
914 Size,Caller);
915
916 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
917
918 /*
919 * accomodate this useful idiom
920 */
921 if (Size == 0)
922 {
923 POOL_TRACE("= NULL\n");
924 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
925 return(NULL);
926 }
927
928 block = ExAllocateWholePageBlock(Size);
929 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
930 return(block);
931
932 #else /* not WHOLE_PAGE_ALLOCATIONS */
933 BLOCK_HDR* current = NULL;
934 PLIST_ENTRY current_entry;
935 PVOID block;
936 BLOCK_HDR* best = NULL;
937 KIRQL oldIrql;
938
939 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
940 Size,Caller);
941
942 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
943
944 VALIDATE_POOL;
945
946 /*
947 * accomodate this useful idiom
948 */
949 if (Size == 0)
950 {
951 POOL_TRACE("= NULL\n");
952 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
953 return(NULL);
954 }
955
956 /*
957 * Look for an already created block of sufficent size
958 */
959 current_entry = FreeBlockListHead.Flink;
960 while (current_entry != &FreeBlockListHead)
961 {
962 DPRINT("current %x size %x tag_next %x\n",current,current->Size,
963 current->tag_next);
964 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
965 if (current->Size >= Size &&
966 (best == NULL || current->Size < best->Size))
967 {
968 best = current;
969 }
970 current_entry = current_entry->Flink;
971 }
972 if (best != NULL)
973 {
974 block=take_block(best, Size, Tag, Caller);
975 VALIDATE_POOL;
976 memset(block,0,Size);
977 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
978 return(block);
979 }
980
981
982 /*
983 * Otherwise create a new block
984 */
985 block=block_to_address(grow_kernel_pool(Size, Tag, Caller));
986 VALIDATE_POOL;
987 memset(block, 0, Size);
988 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
989 return(block);
990 #endif /* WHOLE_PAGE_ALLOCATIONS */
991 }
992
993 #ifdef WHOLE_PAGE_ALLOCATIONS
994
995 PVOID STDCALL
996 ExAllocateWholePageBlock(ULONG UserSize)
997 {
998 PVOID Address;
999 PVOID Page;
1000 ULONG i;
1001 ULONG Size;
1002 ULONG NrPages;
1003
1004 Size = sizeof(ULONG) + UserSize;
1005 NrPages = ROUND_UP(Size, PAGESIZE) / PAGESIZE;
1006
1007 Address = MiAllocNonPagedPoolRegion(NrPages + 1);
1008
1009 for (i = 0; i < NrPages; i++)
1010 {
1011 Page = MmAllocPage(0);
1012 if (Page == NULL)
1013 {
1014 KeBugCheck(0);
1015 }
1016 MmCreateVirtualMapping(NULL,
1017 Address + (i * PAGESIZE),
1018 PAGE_READWRITE | PAGE_SYSTEM,
1019 (ULONG)Page);
1020 }
1021
1022 *((PULONG)((ULONG)Address + (NrPages * PAGESIZE) - Size)) = NrPages;
1023 return((PVOID)((ULONG)Address + (NrPages * PAGESIZE) - UserSize));
1024 }
1025
1026 VOID STDCALL
1027 ExFreeWholePageBlock(PVOID Addr)
1028 {
1029 ULONG NrPages;
1030
1031 if ((ULONG)Addr < kernel_pool_base ||
1032 (ULONG)Addr >= (kernel_pool_base + NONPAGED_POOL_SIZE))
1033 {
1034 DbgPrint("Block %x found outside pool area\n", Addr);
1035 KeBugCheck(0);
1036 }
1037 NrPages = *(PULONG)((ULONG)Addr - sizeof(ULONG));
1038 MiFreeNonPagedPoolRegion((PVOID)PAGE_ROUND_DOWN((ULONG)Addr), NrPages, TRUE);
1039 }
1040
1041 #endif /* WHOLE_PAGE_ALLOCATIONS */
1042
1043 /* EOF */