2002-11-10 Casper S. Hornstrup <chorns@users.sourceforge.net>
[reactos.git] / reactos / ntoskrnl / mm / npool.c
1 /* $Id: npool.c,v 1.64 2002/11/10 18:17:42 chorns Exp $
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/npool.c
6 * PURPOSE: Implements the kernel memory pool
7 * PROGRAMMER: David Welch (welch@cwcom.net)
8 * UPDATE HISTORY:
9 * 27/05/98: Created
10 * 10/06/98: Bug fixes by Iwan Fatahi (i_fatahi@hotmail.com)
11 * in take_block (if current bigger than required)
12 * in remove_from_used_list
13 * in ExFreePool
14 * 23/08/98: Fixes from Robert Bergkvist (fragdance@hotmail.com)
15 */
16
17 /* INCLUDES ****************************************************************/
18
19 #include <ddk/ntddk.h>
20 #include <internal/mm.h>
21 #include <internal/ntoskrnl.h>
22 #include <internal/pool.h>
23
24 #define NDEBUG
25 #include <internal/debug.h>
26
27 /* Enable strict checking of the nonpaged pool on every allocation */
28 //#define ENABLE_VALIDATE_POOL
29
30 /* Enable tracking of statistics about the tagged blocks in the pool */
31 #define TAG_STATISTICS_TRACKING
32
33 /*
34 * Put each block in its own range of pages and position the block at the
35 * end of the range so any accesses beyond the end of block are to invalid
36 * memory locations.
37 */
38 //#define WHOLE_PAGE_ALLOCATIONS
39
40 #ifdef ENABLE_VALIDATE_POOL
41 #define VALIDATE_POOL validate_kernel_pool()
42 #else
43 #define VALIDATE_POOL
44 #endif
45
46 #if 0
47 #define POOL_TRACE(args...) do { DbgPrint(args); } while(0);
48 #else
49 #define POOL_TRACE(args...)
50 #endif
51
52 /* TYPES *******************************************************************/
53
54 #define BLOCK_HDR_USED_MAGIC (0xdeadbeef)
55 #define BLOCK_HDR_FREE_MAGIC (0xceadbeef)
56
57 /*
58 * fields present at the start of a block (this is for internal use only)
59 */
60 typedef struct _BLOCK_HDR
61 {
62 ULONG Magic;
63 ULONG Size;
64 LIST_ENTRY ListEntry;
65 ULONG Tag;
66 PVOID Caller;
67 struct _BLOCK_HDR* tag_next;
68 BOOLEAN Dumped;
69 } BLOCK_HDR;
70
71 PVOID STDCALL
72 ExAllocateWholePageBlock(ULONG Size);
73 VOID STDCALL
74 ExFreeWholePageBlock(PVOID Addr);
75
76 /* GLOBALS *****************************************************************/
77
78 /*
79 * Memory managment initalized symbol for the base of the pool
80 */
81 static unsigned int kernel_pool_base = 0;
82
83 /*
84 * Head of the list of free blocks
85 */
86 static LIST_ENTRY FreeBlockListHead;
87
88 /*
89 * Head of the list of in use block
90 */
91 static LIST_ENTRY UsedBlockListHead;
92
93 #ifndef WHOLE_PAGE_ALLOCATIONS
94 /*
95 * Count of free blocks
96 */
97 static ULONG EiNrFreeBlocks = 0;
98
99 /*
100 * Count of used blocks
101 */
102 static ULONG EiNrUsedBlocks = 0;
103 #endif
104
105 /*
106 * Lock that protects the non-paged pool data structures
107 */
108 static KSPIN_LOCK MmNpoolLock;
109
110 /*
111 * Total memory used for free nonpaged pool blocks
112 */
113 ULONG EiFreeNonPagedPool = 0;
114
115 /*
116 * Total memory used for nonpaged pool blocks
117 */
118 ULONG EiUsedNonPagedPool = 0;
119
120 /*
121 * Allocate a range of memory in the nonpaged pool
122 */
123 PVOID
124 MiAllocNonPagedPoolRegion(unsigned int nr_pages);
125
126 VOID
127 MiFreeNonPagedPoolRegion(PVOID Addr, ULONG Count, BOOLEAN Free);
128
129 #ifdef TAG_STATISTICS_TRACKING
130 #define TAG_HASH_TABLE_SIZE (1024)
131 static BLOCK_HDR* tag_hash_table[TAG_HASH_TABLE_SIZE];
132 #endif /* TAG_STATISTICS_TRACKING */
133
134 /* FUNCTIONS ***************************************************************/
135
136 #ifdef TAG_STATISTICS_TRACKING
137 VOID
138 MiRemoveFromTagHashTable(BLOCK_HDR* block)
139 /*
140 * Remove a block from the tag hash table
141 */
142 {
143 BLOCK_HDR* previous;
144 BLOCK_HDR* current;
145 ULONG hash;
146
147 if (block->Tag == 0)
148 {
149 return;
150 }
151
152 hash = block->Tag % TAG_HASH_TABLE_SIZE;
153
154 previous = NULL;
155 current = tag_hash_table[hash];
156 while (current != NULL)
157 {
158 if (current == block)
159 {
160 if (previous == NULL)
161 {
162 tag_hash_table[hash] = block->tag_next;
163 }
164 else
165 {
166 previous->tag_next = block->tag_next;
167 }
168 return;
169 }
170 previous = current;
171 current = current->tag_next;
172 }
173 DPRINT1("Tagged block wasn't on hash table list (Tag %x Caller %x)\n",
174 block->Tag, block->Caller);
175 KeBugCheck(0);
176 }
177
178 VOID
179 MiAddToTagHashTable(BLOCK_HDR* block)
180 /*
181 * Add a block to the tag hash table
182 */
183 {
184 ULONG hash;
185 BLOCK_HDR* current;
186 BLOCK_HDR* previous;
187
188 if (block->Tag == 0)
189 {
190 return;
191 }
192
193 hash = block->Tag % TAG_HASH_TABLE_SIZE;
194
195 previous = NULL;
196 current = tag_hash_table[hash];
197 while (current != NULL)
198 {
199 if (current->Tag == block->Tag)
200 {
201 block->tag_next = current->tag_next;
202 current->tag_next = block;
203 return;
204 }
205 previous = current;
206 if (current->tag_next &&((PVOID)current->tag_next >= (PVOID)kernel_pool_base + NONPAGED_POOL_SIZE || (PVOID)current->tag_next < (PVOID)kernel_pool_base))
207 {
208 DbgPrint("previous %x\n", previous);
209 }
210 current = current->tag_next;
211 }
212 block->tag_next = NULL;
213 if (previous == NULL)
214 {
215 tag_hash_table[hash] = block;
216 }
217 else
218 {
219 previous->tag_next = block;
220 }
221 }
222 #endif /* TAG_STATISTICS_TRACKING */
223
224 VOID
225 ExInitNonPagedPool(ULONG BaseAddress)
226 {
227 kernel_pool_base = BaseAddress;
228 KeInitializeSpinLock(&MmNpoolLock);
229 MmInitKernelMap((PVOID)BaseAddress);
230 memset(tag_hash_table, 0, sizeof(tag_hash_table));
231 InitializeListHead(&FreeBlockListHead);
232 InitializeListHead(&UsedBlockListHead);
233 }
234
235 #ifdef TAG_STATISTICS_TRACKING
236 VOID STATIC
237 MiDumpTagStats(ULONG CurrentTag, ULONG CurrentNrBlocks, ULONG CurrentSize)
238 {
239 CHAR c1, c2, c3, c4;
240
241 c1 = (CurrentTag >> 24) & 0xFF;
242 c2 = (CurrentTag >> 16) & 0xFF;
243 c3 = (CurrentTag >> 8) & 0xFF;
244 c4 = CurrentTag & 0xFF;
245
246 if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
247 {
248 DbgPrint("Tag %x (%c%c%c%c) Blocks %d Total Size %d Average Size %d\n",
249 CurrentTag, c4, c3, c2, c1, CurrentNrBlocks,
250 CurrentSize, CurrentSize / CurrentNrBlocks);
251 }
252 else
253 {
254 DbgPrint("Tag %x Blocks %d Total Size %d Average Size %d\n",
255 CurrentTag, CurrentNrBlocks, CurrentSize,
256 CurrentSize / CurrentNrBlocks);
257 }
258 }
259 #endif /* TAG_STATISTICS_TRACKING */
260
261 VOID
262 MiDebugDumpNonPagedPoolStats(BOOLEAN NewOnly)
263 {
264 #ifdef TAG_STATISTICS_TRACKING
265 ULONG i;
266 BLOCK_HDR* current;
267 ULONG CurrentTag;
268 ULONG CurrentNrBlocks;
269 ULONG CurrentSize;
270 ULONG TotalBlocks;
271 ULONG TotalSize;
272
273 DbgPrint("******* Dumping non paging pool stats ******\n");
274 TotalBlocks = 0;
275 TotalSize = 0;
276 for (i = 0; i < TAG_HASH_TABLE_SIZE; i++)
277 {
278 CurrentTag = 0;
279 CurrentNrBlocks = 0;
280 CurrentSize = 0;
281 current = tag_hash_table[i];
282 while (current != NULL)
283 {
284 if (current->Tag != CurrentTag)
285 {
286 if (CurrentTag != 0 && CurrentNrBlocks != 0)
287 {
288 MiDumpTagStats(CurrentTag, CurrentNrBlocks, CurrentSize);
289 }
290 CurrentTag = current->Tag;
291 CurrentNrBlocks = 0;
292 CurrentSize = 0;
293 }
294
295 if (!NewOnly || !current->Dumped)
296 {
297 CurrentNrBlocks++;
298 TotalBlocks++;
299 CurrentSize = CurrentSize + current->Size;
300 TotalSize = TotalSize + current->Size;
301 current->Dumped = TRUE;
302 }
303 current = current->tag_next;
304 }
305 if (CurrentTag != 0 && CurrentNrBlocks != 0)
306 {
307 MiDumpTagStats(CurrentTag, CurrentNrBlocks, CurrentSize);
308 }
309 }
310 if (TotalBlocks != 0)
311 {
312 DbgPrint("TotalBlocks %d TotalSize %d AverageSize %d\n",
313 TotalBlocks, TotalSize, TotalSize / TotalBlocks);
314 }
315 else
316 {
317 DbgPrint("TotalBlocks %d TotalSize %d\n",
318 TotalBlocks, TotalSize);
319 }
320 DbgPrint("***************** Dump Complete ***************\n");
321 #endif /* TAG_STATISTICS_TRACKING */
322 }
323
324 VOID
325 MiDebugDumpNonPagedPool(BOOLEAN NewOnly)
326 {
327 BLOCK_HDR* current;
328 PLIST_ENTRY current_entry;
329 KIRQL oldIrql;
330
331 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
332
333 DbgPrint("******* Dumping non paging pool contents ******\n");
334 current_entry = UsedBlockListHead.Flink;
335 while (current_entry != &UsedBlockListHead)
336 {
337 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
338 if (!NewOnly || !current->Dumped)
339 {
340 CHAR c1, c2, c3, c4;
341
342 c1 = (current->Tag >> 24) & 0xFF;
343 c2 = (current->Tag >> 16) & 0xFF;
344 c3 = (current->Tag >> 8) & 0xFF;
345 c4 = current->Tag & 0xFF;
346
347 if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
348 {
349 DbgPrint("Size 0x%x Tag 0x%x (%c%c%c%c) Allocator 0x%x\n",
350 current->Size, current->Tag, c4, c3, c2, c1,
351 current->Caller);
352 }
353 else
354 {
355 DbgPrint("Size 0x%x Tag 0x%x Allocator 0x%x\n",
356 current->Size, current->Tag, current->Caller);
357 }
358 current->Dumped = TRUE;
359 }
360 current_entry = current_entry->Flink;
361 }
362 DbgPrint("***************** Dump Complete ***************\n");
363 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
364 }
365
366 #ifndef WHOLE_PAGE_ALLOCATIONS
367
368 #ifdef ENABLE_VALIDATE_POOL
369 static void validate_free_list(void)
370 /*
371 * FUNCTION: Validate the integrity of the list of free blocks
372 */
373 {
374 BLOCK_HDR* current;
375 PLIST_ENTRY current_entry;
376 unsigned int blocks_seen=0;
377
378 current_entry = FreeBlockListHead.Flink;
379 while (current_entry != &FreeBlockListHead)
380 {
381 unsigned int base_addr;
382
383 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
384 base_addr = (int)current;
385
386 if (current->Magic != BLOCK_HDR_FREE_MAGIC)
387 {
388 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
389 current);
390 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
391 }
392
393 if (base_addr < (kernel_pool_base) ||
394 (base_addr+current->Size) > (kernel_pool_base)+NONPAGED_POOL_SIZE)
395 {
396 DbgPrint("Block %x found outside pool area\n",current);
397 DbgPrint("Size %d\n",current->Size);
398 DbgPrint("Limits are %x %x\n",kernel_pool_base,
399 kernel_pool_base+NONPAGED_POOL_SIZE);
400 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
401 }
402 blocks_seen++;
403 if (blocks_seen > EiNrFreeBlocks)
404 {
405 DbgPrint("Too many blocks on free list\n");
406 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
407 }
408 if (current->ListEntry.Flink != &FreeBlockListHead &&
409 current->ListEntry.Flink->Blink != &current->ListEntry)
410 {
411 DbgPrint("%s:%d:Break in list (current %x next %x "
412 "current->next->previous %x)\n",
413 __FILE__,__LINE__,current, current->ListEntry.Flink,
414 current->ListEntry.Flink->Blink);
415 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
416 }
417
418 current_entry = current_entry->Flink;
419 }
420 }
421
422 static void validate_used_list(void)
423 /*
424 * FUNCTION: Validate the integrity of the list of used blocks
425 */
426 {
427 BLOCK_HDR* current;
428 PLIST_ENTRY current_entry;
429 unsigned int blocks_seen=0;
430
431 current_entry = UsedBlockListHead.Flink;
432 while (current_entry != &UsedBlockListHead)
433 {
434 unsigned int base_addr;
435
436 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
437 base_addr = (int)current;
438
439 if (current->Magic != BLOCK_HDR_USED_MAGIC)
440 {
441 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
442 current);
443 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
444 }
445 if (base_addr < (kernel_pool_base) ||
446 (base_addr+current->Size) >
447 (kernel_pool_base)+NONPAGED_POOL_SIZE)
448 {
449 DbgPrint("Block %x found outside pool area\n",current);
450 for(;;);
451 }
452 blocks_seen++;
453 if (blocks_seen > EiNrUsedBlocks)
454 {
455 DbgPrint("Too many blocks on used list\n");
456 for(;;);
457 }
458 if (current->ListEntry.Flink != &UsedBlockListHead &&
459 current->ListEntry.Flink->Blink != &current->ListEntry)
460 {
461 DbgPrint("Break in list (current %x next %x)\n",
462 current, current->ListEntry.Flink);
463 for(;;);
464 }
465
466 current_entry = current_entry->Flink;
467 }
468 }
469
470 static void check_duplicates(BLOCK_HDR* blk)
471 /*
472 * FUNCTION: Check a block has no duplicates
473 * ARGUMENTS:
474 * blk = block to check
475 * NOTE: Bug checks if duplicates are found
476 */
477 {
478 unsigned int base = (int)blk;
479 unsigned int last = ((int)blk) + +sizeof(BLOCK_HDR) + blk->Size;
480 BLOCK_HDR* current;
481 PLIST_ENTRY current_entry;
482
483 current_entry = FreeBlockListHead.Flink;
484 while (current_entry != &FreeBlockListHead)
485 {
486 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
487
488 if (current->Magic != BLOCK_HDR_FREE_MAGIC)
489 {
490 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
491 current);
492 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
493 }
494
495 if ( (int)current > base && (int)current < last )
496 {
497 DbgPrint("intersecting blocks on list\n");
498 for(;;);
499 }
500 if ( (int)current < base &&
501 ((int)current + current->Size + sizeof(BLOCK_HDR))
502 > base )
503 {
504 DbgPrint("intersecting blocks on list\n");
505 for(;;);
506 }
507
508 current_entry = current_entry->Flink;
509 }
510
511 current_entry = UsedBlockListHead.Flink;
512 while (current_entry != &UsedBlockListHead)
513 {
514 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
515
516 if ( (int)current > base && (int)current < last )
517 {
518 DbgPrint("intersecting blocks on list\n");
519 for(;;);
520 }
521 if ( (int)current < base &&
522 ((int)current + current->Size + sizeof(BLOCK_HDR))
523 > base )
524 {
525 DbgPrint("intersecting blocks on list\n");
526 for(;;);
527 }
528
529 current_entry = current_entry->Flink;
530 }
531
532 }
533
534 static void validate_kernel_pool(void)
535 /*
536 * FUNCTION: Checks the integrity of the kernel memory heap
537 */
538 {
539 BLOCK_HDR* current;
540 PLIST_ENTRY current_entry;
541
542 validate_free_list();
543 validate_used_list();
544
545 current_entry = FreeBlockListHead.Flink;
546 while (current_entry != &FreeBlockListHead)
547 {
548 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
549 check_duplicates(current);
550 current_entry = current_entry->Flink;
551 }
552 current_entry = UsedBlockListHead.Flink;
553 while (current_entry != &UsedBlockListHead)
554 {
555 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
556 check_duplicates(current);
557 current_entry = current_entry->Flink;
558 }
559 }
560 #endif
561
562 #if 0
563 STATIC VOID
564 free_pages(BLOCK_HDR* blk)
565 {
566 ULONG start;
567 ULONG end;
568 ULONG i;
569
570 start = (ULONG)blk;
571 end = (ULONG)blk + sizeof(BLOCK_HDR) + blk->Size;
572
573 /*
574 * If the block doesn't contain a whole page then there is nothing to do
575 */
576 if (PAGE_ROUND_UP(start) >= PAGE_ROUND_DOWN(end))
577 {
578 return;
579 }
580 }
581 #endif
582
583 STATIC VOID
584 merge_free_block(BLOCK_HDR* blk)
585 {
586 PLIST_ENTRY next_entry;
587 BLOCK_HDR* next;
588 PLIST_ENTRY previous_entry;
589 BLOCK_HDR* previous;
590
591 next_entry = blk->ListEntry.Flink;
592 if (next_entry != &FreeBlockListHead)
593 {
594 next = CONTAINING_RECORD(next_entry, BLOCK_HDR, ListEntry);
595 if (((unsigned int)blk + sizeof(BLOCK_HDR) + blk->Size) ==
596 (unsigned int)next)
597 {
598 RemoveEntryList(&next->ListEntry);
599 blk->Size = blk->Size + next->Size;
600 memset(next, 0xcc, sizeof(BLOCK_HDR));
601 EiFreeNonPagedPool += sizeof(BLOCK_HDR);
602 EiNrFreeBlocks--;
603 }
604 }
605
606 previous_entry = blk->ListEntry.Blink;
607 if (previous_entry != &FreeBlockListHead)
608 {
609 previous = CONTAINING_RECORD(previous_entry, BLOCK_HDR, ListEntry);
610 if (((unsigned int)previous + sizeof(BLOCK_HDR) + previous->Size) ==
611 (unsigned int)blk)
612 {
613 RemoveEntryList(&blk->ListEntry);
614 previous->Size = previous->Size + sizeof(BLOCK_HDR) + blk->Size;
615 memset(blk, 0xcc, sizeof(BLOCK_HDR));
616 EiFreeNonPagedPool += sizeof(BLOCK_HDR);
617 EiNrFreeBlocks--;
618 }
619 }
620 }
621
622 STATIC VOID
623 add_to_free_list(BLOCK_HDR* blk)
624 /*
625 * FUNCTION: add the block to the free list (internal)
626 */
627 {
628 PLIST_ENTRY current_entry;
629 BLOCK_HDR* current;
630
631 current_entry = FreeBlockListHead.Flink;
632 while (current_entry != &FreeBlockListHead)
633 {
634 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
635
636 if ((unsigned int)current > (unsigned int)blk)
637 {
638 blk->ListEntry.Flink = current_entry;
639 blk->ListEntry.Blink = current_entry->Blink;
640 current_entry->Blink->Flink = &blk->ListEntry;
641 current_entry->Blink = &blk->ListEntry;
642 EiFreeNonPagedPool += blk->Size;
643 EiNrFreeBlocks++;
644 return;
645 }
646
647 current_entry = current_entry->Flink;
648 }
649 InsertTailList(&FreeBlockListHead, &blk->ListEntry);
650 EiFreeNonPagedPool += blk->Size;
651 EiNrFreeBlocks++;
652 }
653
654 static void add_to_used_list(BLOCK_HDR* blk)
655 /*
656 * FUNCTION: add the block to the used list (internal)
657 */
658 {
659 InsertHeadList(&UsedBlockListHead, &blk->ListEntry);
660 EiUsedNonPagedPool += blk->Size;
661 EiNrUsedBlocks++;
662 }
663
664
665 static void remove_from_free_list(BLOCK_HDR* current)
666 {
667 RemoveEntryList(&current->ListEntry);
668 EiFreeNonPagedPool -= current->Size;
669 EiNrFreeBlocks--;
670 }
671
672
673 static void remove_from_used_list(BLOCK_HDR* current)
674 {
675 RemoveEntryList(&current->ListEntry);
676 EiUsedNonPagedPool -= current->Size;
677 EiNrUsedBlocks--;
678 }
679
680
681 inline static void* block_to_address(BLOCK_HDR* blk)
682 /*
683 * FUNCTION: Translate a block header address to the corresponding block
684 * address (internal)
685 */
686 {
687 return ( (void *) ((int)blk + sizeof(BLOCK_HDR)) );
688 }
689
690 inline static BLOCK_HDR* address_to_block(void* addr)
691 {
692 return (BLOCK_HDR *)
693 ( ((int)addr) - sizeof(BLOCK_HDR) );
694 }
695
696 static BLOCK_HDR* lookup_block(unsigned int size)
697 {
698 PLIST_ENTRY current_entry;
699 BLOCK_HDR* current;
700 BLOCK_HDR* best = NULL;
701 ULONG new_size;
702 PVOID block, block_boundary;
703
704 current_entry = FreeBlockListHead.Flink;
705 if (size < PAGE_SIZE)
706 {
707 while (current_entry != &FreeBlockListHead)
708 {
709 DPRINT("current %x size %x tag_next %x\n",
710 current, current->Size, current->tag_next);
711 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
712 if (current->Size >= size &&
713 (best == NULL || current->Size < best->Size))
714 {
715 best = current;
716 }
717 current_entry = current_entry->Flink;
718 }
719 }
720 else
721 {
722 while (current_entry != &FreeBlockListHead)
723 {
724 DPRINT("current %x size %x tag_next %x\n",
725 current, current->Size, current->tag_next);
726 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
727
728 block = block_to_address(current);
729 block_boundary = (PVOID)PAGE_ROUND_UP((ULONG)block);
730 new_size = (ULONG)block_boundary - (ULONG)block + size;
731 if (new_size != size && (ULONG)block_boundary - (ULONG)block < sizeof(BLOCK_HDR))
732 {
733 new_size += PAGE_SIZE;
734 }
735 if (current->Size >= new_size &&
736 (best == NULL || current->Size < best->Size))
737 {
738 best = current;
739 }
740 current_entry = current_entry->Flink;
741 }
742 }
743 return best;
744 }
745
746 static void* take_block(BLOCK_HDR* current, unsigned int size,
747 ULONG Tag, PVOID Caller)
748 /*
749 * FUNCTION: Allocate a used block of least 'size' from the specified
750 * free block
751 * RETURNS: The address of the created memory block
752 */
753 {
754
755 BLOCK_HDR* blk;
756 if (size >= PAGE_SIZE)
757 {
758 blk = address_to_block((PVOID)PAGE_ROUND_UP(block_to_address (current)));
759 if (blk != current)
760 {
761 if ((ULONG)blk - (ULONG)current < sizeof(BLOCK_HDR))
762 {
763 (ULONG)blk += PAGE_SIZE;
764 }
765 assert((ULONG)blk - (ULONG)current + size <= current->Size && (ULONG)blk - (ULONG)current >= sizeof(BLOCK_HDR));
766
767 memset(blk, 0, sizeof(BLOCK_HDR));
768 blk->Magic = BLOCK_HDR_FREE_MAGIC;
769 blk->Size = current->Size - ((ULONG)blk - (ULONG)current);
770 current->Size -= (blk->Size + sizeof(BLOCK_HDR));
771 InsertHeadList(&current->ListEntry, &blk->ListEntry);
772 EiFreeNonPagedPool -= sizeof(BLOCK_HDR);
773 EiNrFreeBlocks++;
774 current = blk;
775 }
776 }
777 /*
778 * If the block is much bigger than required then split it and
779 * return a pointer to the allocated section. If the difference
780 * between the sizes is marginal it makes no sense to have the
781 * extra overhead
782 */
783 if (current->Size > size + sizeof(BLOCK_HDR))
784 {
785 BLOCK_HDR* free_blk;
786
787 EiFreeNonPagedPool -= current->Size;
788
789 /*
790 * Replace the bigger block with a smaller block in the
791 * same position in the list
792 */
793 free_blk = (BLOCK_HDR *)(((int)current)
794 + sizeof(BLOCK_HDR) + size);
795 free_blk->Magic = BLOCK_HDR_FREE_MAGIC;
796 InsertHeadList(&current->ListEntry, &free_blk->ListEntry);
797 free_blk->Size = current->Size - (sizeof(BLOCK_HDR) + size);
798
799 current->Size=size;
800 RemoveEntryList(&current->ListEntry);
801 add_to_used_list(current);
802 current->Magic = BLOCK_HDR_USED_MAGIC;
803 current->Tag = Tag;
804 current->Caller = Caller;
805 current->Dumped = FALSE;
806 #ifdef TAG_STATISTICS_TRACKING
807 MiAddToTagHashTable(current);
808 #endif /* TAG_STATISTICS_TRACKING */
809
810 EiFreeNonPagedPool += free_blk->Size;
811
812 VALIDATE_POOL;
813 return(block_to_address(current));
814 }
815
816 /*
817 * Otherwise allocate the whole block
818 */
819 remove_from_free_list(current);
820 add_to_used_list(current);
821
822 current->Magic = BLOCK_HDR_USED_MAGIC;
823 current->Tag = Tag;
824 current->Caller = Caller;
825 current->Dumped = FALSE;
826 #ifdef TAG_STATISTICS_TRACKING
827 MiAddToTagHashTable(current);
828 #endif /* TAG_STATISTICS_TRACKING */
829
830 VALIDATE_POOL;
831 return(block_to_address(current));
832 }
833
834 static void* grow_kernel_pool(unsigned int size, ULONG Tag, PVOID Caller)
835 /*
836 * FUNCTION: Grow the executive heap to accomodate a block of at least 'size'
837 * bytes
838 */
839 {
840 ULONG nr_pages = PAGE_ROUND_UP(size + sizeof(BLOCK_HDR)) / PAGE_SIZE;
841 ULONG start;
842 BLOCK_HDR* blk=NULL;
843 ULONG i;
844 KIRQL oldIrql;
845 NTSTATUS Status;
846 PVOID block = NULL;
847
848 if (size >= PAGE_SIZE)
849 {
850 nr_pages++;
851 }
852
853 start = (ULONG)MiAllocNonPagedPoolRegion(nr_pages);
854
855 DPRINT("growing heap for block size %d, ",size);
856 DPRINT("start %x\n",start);
857
858 for (i=0;i<nr_pages;i++)
859 {
860 PHYSICAL_ADDRESS Page;
861 /* FIXME: Check whether we can really wait here. */
862 Status = MmRequestPageMemoryConsumer(MC_NPPOOL, TRUE, &Page);
863 if (!NT_SUCCESS(Status))
864 {
865 KeBugCheck(0);
866 return(NULL);
867 }
868 Status = MmCreateVirtualMapping(NULL,
869 (PVOID)(start + (i*PAGE_SIZE)),
870 PAGE_READWRITE|PAGE_SYSTEM,
871 Page,
872 FALSE);
873 if (!NT_SUCCESS(Status))
874 {
875 DbgPrint("Unable to create virtual mapping\n");
876 KeBugCheck(0);
877 }
878 }
879
880 blk = (struct _BLOCK_HDR *)start;
881 memset(blk, 0, sizeof(BLOCK_HDR));
882 blk->Size = (nr_pages * PAGE_SIZE) - sizeof(BLOCK_HDR);
883 blk->Magic = BLOCK_HDR_FREE_MAGIC;
884 memset(block_to_address(blk), 0xcc, blk->Size);
885
886 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
887 add_to_free_list(blk);
888 merge_free_block(blk);
889
890 blk = lookup_block(size);
891 if (blk)
892 {
893 block = take_block(blk, size, Tag, Caller);
894 VALIDATE_POOL;
895 }
896 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
897 if (block == NULL)
898 {
899 CHECKPOINT1;
900 }
901 return block;
902 }
903
904 #endif /* not WHOLE_PAGE_ALLOCATIONS */
905
906 VOID STDCALL ExFreeNonPagedPool (PVOID block)
907 /*
908 * FUNCTION: Releases previously allocated memory
909 * ARGUMENTS:
910 * block = block to free
911 */
912 {
913 #ifdef WHOLE_PAGE_ALLOCATIONS /* WHOLE_PAGE_ALLOCATIONS */
914 KIRQL oldIrql;
915
916 if (block == NULL)
917 {
918 return;
919 }
920
921 DPRINT("freeing block %x\n",blk);
922
923 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->size,
924 ((PULONG)&block)[-1]);
925
926 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
927
928 ExFreeWholePageBlock(block);
929 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
930
931 #else /* not WHOLE_PAGE_ALLOCATIONS */
932
933 BLOCK_HDR* blk=address_to_block(block);
934 KIRQL oldIrql;
935
936 if (block == NULL)
937 {
938 return;
939 }
940
941 DPRINT("freeing block %x\n",blk);
942
943 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->size,
944 ((PULONG)&block)[-1]);
945
946 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
947
948 VALIDATE_POOL;
949
950 if (blk->Magic != BLOCK_HDR_USED_MAGIC)
951 {
952 if (blk->Magic == BLOCK_HDR_FREE_MAGIC)
953 {
954 DbgPrint("ExFreePool of already freed address %x\n", block);
955 }
956 else
957 {
958 DbgPrint("ExFreePool of non-allocated address %x (magic %x)\n",
959 block, blk->Magic);
960 }
961 KeBugCheck(0);
962 return;
963 }
964
965 memset(block, 0xcc, blk->Size);
966
967 #ifdef TAG_STATISTICS_TRACKING
968 MiRemoveFromTagHashTable(blk);
969 #endif /* TAG_STATISTICS_TRACKING */
970 remove_from_used_list(blk);
971 blk->Magic = BLOCK_HDR_FREE_MAGIC;
972 blk->Tag = 0;
973 blk->Caller = NULL;
974 blk->tag_next = NULL;
975 add_to_free_list(blk);
976 merge_free_block(blk);
977
978 VALIDATE_POOL;
979 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
980
981 #endif /* WHOLE_PAGE_ALLOCATIONS */
982 }
983
984 PVOID STDCALL
985 ExAllocateNonPagedPoolWithTag(ULONG Type, ULONG Size, ULONG Tag, PVOID Caller)
986 {
987 #ifdef WHOLE_PAGE_ALLOCATIONS
988 PVOID block;
989 KIRQL oldIrql;
990
991 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
992 Size,Caller);
993
994 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
995
996 /*
997 * accomodate this useful idiom
998 */
999 if (Size == 0)
1000 {
1001 POOL_TRACE("= NULL\n");
1002 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1003 return(NULL);
1004 }
1005
1006 block = ExAllocateWholePageBlock(Size);
1007 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1008 return(block);
1009
1010 #else /* not WHOLE_PAGE_ALLOCATIONS */
1011 PVOID block;
1012 BLOCK_HDR* best = NULL;
1013 KIRQL oldIrql;
1014
1015 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
1016 Size,Caller);
1017
1018 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1019
1020 VALIDATE_POOL;
1021
1022 #if 0
1023 /* after some allocations print the npaged pool stats */
1024 #ifdef TAG_STATISTICS_TRACKING
1025 {
1026 static ULONG counter = 0;
1027 if (counter++ % 100000 == 0)
1028 {
1029 MiDebugDumpNonPagedPoolStats(FALSE);
1030 }
1031 }
1032 #endif
1033 #endif
1034 /*
1035 * accomodate this useful idiom
1036 */
1037 if (Size == 0)
1038 {
1039 POOL_TRACE("= NULL\n");
1040 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1041 return(NULL);
1042 }
1043 /* Make the size dword alligned, this makes the block dword alligned */
1044 Size = ROUND_UP(Size, 4);
1045 /*
1046 * Look for an already created block of sufficent size
1047 */
1048 best = lookup_block(Size);
1049 if (best == NULL)
1050 {
1051 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1052 block = grow_kernel_pool(Size, Tag, Caller);
1053 assert(block != NULL);
1054 memset(block,0,Size);
1055 }
1056 else
1057 {
1058 block=take_block(best, Size, Tag, Caller);
1059 VALIDATE_POOL;
1060 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1061 memset(block,0,Size);
1062 }
1063 return(block);
1064 #endif /* WHOLE_PAGE_ALLOCATIONS */
1065 }
1066
1067 #ifdef WHOLE_PAGE_ALLOCATIONS
1068
1069 PVOID STDCALL
1070 ExAllocateWholePageBlock(ULONG UserSize)
1071 {
1072 PVOID Address;
1073 PHYSICAL_ADDRESS Page;
1074 ULONG i;
1075 ULONG Size;
1076 ULONG NrPages;
1077
1078 Size = sizeof(ULONG) + UserSize;
1079 NrPages = ROUND_UP(Size, PAGE_SIZE) / PAGE_SIZE;
1080
1081 Address = MiAllocNonPagedPoolRegion(NrPages + 1);
1082
1083 for (i = 0; i < NrPages; i++)
1084 {
1085 Page = MmAllocPage(MC_NPPOOL, 0);
1086 if (Page.QuadPart == 0LL)
1087 {
1088 KeBugCheck(0);
1089 }
1090 MmCreateVirtualMapping(NULL,
1091 Address + (i * PAGE_SIZE),
1092 PAGE_READWRITE | PAGE_SYSTEM,
1093 Page,
1094 TRUE);
1095 }
1096
1097 *((PULONG)((ULONG)Address + (NrPages * PAGE_SIZE) - Size)) = NrPages;
1098 return((PVOID)((ULONG)Address + (NrPages * PAGE_SIZE) - UserSize));
1099 }
1100
1101 VOID STDCALL
1102 ExFreeWholePageBlock(PVOID Addr)
1103 {
1104 ULONG NrPages;
1105
1106 if ((ULONG)Addr < kernel_pool_base ||
1107 (ULONG)Addr >= (kernel_pool_base + NONPAGED_POOL_SIZE))
1108 {
1109 DbgPrint("Block %x found outside pool area\n", Addr);
1110 KeBugCheck(0);
1111 }
1112 NrPages = *(PULONG)((ULONG)Addr - sizeof(ULONG));
1113 MiFreeNonPagedPoolRegion((PVOID)PAGE_ROUND_DOWN((ULONG)Addr), NrPages, TRUE);
1114 }
1115
1116 #endif /* WHOLE_PAGE_ALLOCATIONS */
1117
1118 /* EOF */