Use free Windows DDK and compile with latest MinGW releases.
[reactos.git] / reactos / ntoskrnl / mm / npool.c
1 /* $Id: npool.c,v 1.60 2002/09/07 15:13:00 chorns Exp $
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/npool.c
6 * PURPOSE: Implements the kernel memory pool
7 * PROGRAMMER: David Welch (welch@cwcom.net)
8 * UPDATE HISTORY:
9 * 27/05/98: Created
10 * 10/06/98: Bug fixes by Iwan Fatahi (i_fatahi@hotmail.com)
11 * in take_block (if current bigger than required)
12 * in remove_from_used_list
13 * in ExFreePool
14 * 23/08/98: Fixes from Robert Bergkvist (fragdance@hotmail.com)
15 */
16
17 /* INCLUDES ****************************************************************/
18
19 #include <ntoskrnl.h>
20
21 #define NDEBUG
22 #include <internal/debug.h>
23
24 /* Enable strict checking of the nonpaged pool on every allocation */
25 //#define ENABLE_VALIDATE_POOL
26
27 /* Enable tracking of statistics about the tagged blocks in the pool */
28 #define TAG_STATISTICS_TRACKING
29
30 /*
31 * Put each block in its own range of pages and position the block at the
32 * end of the range so any accesses beyond the end of block are to invalid
33 * memory locations.
34 */
35 //#define WHOLE_PAGE_ALLOCATIONS
36
37 #ifdef ENABLE_VALIDATE_POOL
38 #define VALIDATE_POOL validate_kernel_pool()
39 #else
40 #define VALIDATE_POOL
41 #endif
42
43 #if 0
44 #define POOL_TRACE(args...) do { DbgPrint(args); } while(0);
45 #else
46 #define POOL_TRACE(args...)
47 #endif
48
49 /* TYPES *******************************************************************/
50
51 #define BLOCK_HDR_USED_MAGIC (0xdeadbeef)
52 #define BLOCK_HDR_FREE_MAGIC (0xceadbeef)
53
54 /*
55 * fields present at the start of a block (this is for internal use only)
56 */
57 typedef struct _BLOCK_HDR
58 {
59 ULONG Magic;
60 ULONG Size;
61 LIST_ENTRY ListEntry;
62 ULONG Tag;
63 PVOID Caller;
64 struct _BLOCK_HDR* tag_next;
65 BOOLEAN Dumped;
66 } BLOCK_HDR;
67
68 PVOID STDCALL
69 ExAllocateWholePageBlock(ULONG Size);
70 VOID STDCALL
71 ExFreeWholePageBlock(PVOID Addr);
72
73 /* GLOBALS *****************************************************************/
74
75 /*
76 * Memory managment initalized symbol for the base of the pool
77 */
78 static unsigned int kernel_pool_base = 0;
79
80 /*
81 * Head of the list of free blocks
82 */
83 static LIST_ENTRY FreeBlockListHead;
84
85 /*
86 * Head of the list of in use block
87 */
88 static LIST_ENTRY UsedBlockListHead;
89
90 #ifndef WHOLE_PAGE_ALLOCATIONS
91 /*
92 * Count of free blocks
93 */
94 static ULONG EiNrFreeBlocks = 0;
95
96 /*
97 * Count of used blocks
98 */
99 static ULONG EiNrUsedBlocks = 0;
100 #endif
101
102 /*
103 * Lock that protects the non-paged pool data structures
104 */
105 static KSPIN_LOCK MmNpoolLock;
106
107 /*
108 * Total memory used for free nonpaged pool blocks
109 */
110 ULONG EiFreeNonPagedPool = 0;
111
112 /*
113 * Total memory used for nonpaged pool blocks
114 */
115 ULONG EiUsedNonPagedPool = 0;
116
117 /*
118 * Allocate a range of memory in the nonpaged pool
119 */
120 PVOID
121 MiAllocNonPagedPoolRegion(unsigned int nr_pages);
122
123 VOID
124 MiFreeNonPagedPoolRegion(PVOID Addr, ULONG Count, BOOLEAN Free);
125
126 #ifdef TAG_STATISTICS_TRACKING
127 #define TAG_HASH_TABLE_SIZE (1024)
128 static BLOCK_HDR* tag_hash_table[TAG_HASH_TABLE_SIZE];
129 #endif /* TAG_STATISTICS_TRACKING */
130
131 /* FUNCTIONS ***************************************************************/
132
133 #ifdef TAG_STATISTICS_TRACKING
134 VOID
135 MiRemoveFromTagHashTable(BLOCK_HDR* block)
136 /*
137 * Remove a block from the tag hash table
138 */
139 {
140 BLOCK_HDR* previous;
141 BLOCK_HDR* current;
142 ULONG hash;
143
144 if (block->Tag == 0)
145 {
146 return;
147 }
148
149 hash = block->Tag % TAG_HASH_TABLE_SIZE;
150
151 previous = NULL;
152 current = tag_hash_table[hash];
153 while (current != NULL)
154 {
155 if (current == block)
156 {
157 if (previous == NULL)
158 {
159 tag_hash_table[hash] = block->tag_next;
160 }
161 else
162 {
163 previous->tag_next = block->tag_next;
164 }
165 return;
166 }
167 previous = current;
168 current = current->tag_next;
169 }
170 DPRINT1("Tagged block wasn't on hash table list (Tag %x Caller %x)\n",
171 block->Tag, block->Caller);
172 KeBugCheck(0);
173 }
174
175 VOID
176 MiAddToTagHashTable(BLOCK_HDR* block)
177 /*
178 * Add a block to the tag hash table
179 */
180 {
181 ULONG hash;
182 BLOCK_HDR* current;
183 BLOCK_HDR* previous;
184
185 if (block->Tag == 0)
186 {
187 return;
188 }
189
190 hash = block->Tag % TAG_HASH_TABLE_SIZE;
191
192 previous = NULL;
193 current = tag_hash_table[hash];
194 while (current != NULL)
195 {
196 if (current->Tag == block->Tag)
197 {
198 block->tag_next = current->tag_next;
199 current->tag_next = block;
200 return;
201 }
202 previous = current;
203 current = current->tag_next;
204 }
205 block->tag_next = NULL;
206 if (previous == NULL)
207 {
208 tag_hash_table[hash] = block;
209 }
210 else
211 {
212 previous->tag_next = block;
213 }
214 }
215 #endif /* TAG_STATISTICS_TRACKING */
216
217 VOID
218 ExInitNonPagedPool(ULONG BaseAddress)
219 {
220 kernel_pool_base = BaseAddress;
221 KeInitializeSpinLock(&MmNpoolLock);
222 MmInitKernelMap((PVOID)BaseAddress);
223 memset(tag_hash_table, 0, sizeof(tag_hash_table));
224 InitializeListHead(&FreeBlockListHead);
225 InitializeListHead(&UsedBlockListHead);
226 }
227
228 #ifdef TAG_STATISTICS_TRACKING
229 VOID STATIC
230 MiDumpTagStats(ULONG CurrentTag, ULONG CurrentNrBlocks, ULONG CurrentSize)
231 {
232 CHAR c1, c2, c3, c4;
233
234 c1 = (CurrentTag >> 24) & 0xFF;
235 c2 = (CurrentTag >> 16) & 0xFF;
236 c3 = (CurrentTag >> 8) & 0xFF;
237 c4 = CurrentTag & 0xFF;
238
239 if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
240 {
241 DbgPrint("Tag %x (%c%c%c%c) Blocks %d Total Size %d Average Size %d\n",
242 CurrentTag, c4, c3, c2, c1, CurrentNrBlocks,
243 CurrentSize, CurrentSize / CurrentNrBlocks);
244 }
245 else
246 {
247 DbgPrint("Tag %x Blocks %d Total Size %d Average Size %d\n",
248 CurrentTag, CurrentNrBlocks, CurrentSize,
249 CurrentSize / CurrentNrBlocks);
250 }
251 }
252 #endif /* TAG_STATISTICS_TRACKING */
253
254 VOID
255 MiDebugDumpNonPagedPoolStats(BOOLEAN NewOnly)
256 {
257 #ifdef TAG_STATISTICS_TRACKING
258 ULONG i;
259 BLOCK_HDR* current;
260 ULONG CurrentTag;
261 ULONG CurrentNrBlocks;
262 ULONG CurrentSize;
263 ULONG TotalBlocks;
264 ULONG TotalSize;
265
266 DbgPrint("******* Dumping non paging pool stats ******\n");
267 TotalBlocks = 0;
268 TotalSize = 0;
269 for (i = 0; i < TAG_HASH_TABLE_SIZE; i++)
270 {
271 CurrentTag = 0;
272 CurrentNrBlocks = 0;
273 CurrentSize = 0;
274 current = tag_hash_table[i];
275 while (current != NULL)
276 {
277 if (current->Tag != CurrentTag)
278 {
279 if (CurrentTag != 0 && CurrentNrBlocks != 0)
280 {
281 MiDumpTagStats(CurrentTag, CurrentNrBlocks, CurrentSize);
282 }
283 CurrentTag = current->Tag;
284 CurrentNrBlocks = 0;
285 CurrentSize = 0;
286 }
287
288 if (!NewOnly || !current->Dumped)
289 {
290 CurrentNrBlocks++;
291 TotalBlocks++;
292 CurrentSize = CurrentSize + current->Size;
293 TotalSize = TotalSize + current->Size;
294 current->Dumped = TRUE;
295 }
296 current = current->tag_next;
297 }
298 if (CurrentTag != 0 && CurrentNrBlocks != 0)
299 {
300 MiDumpTagStats(CurrentTag, CurrentNrBlocks, CurrentSize);
301 }
302 }
303 if (TotalBlocks != 0)
304 {
305 DbgPrint("TotalBlocks %d TotalSize %d AverageSize %d\n",
306 TotalBlocks, TotalSize, TotalSize / TotalBlocks);
307 }
308 else
309 {
310 DbgPrint("TotalBlocks %d TotalSize %d\n",
311 TotalBlocks, TotalSize);
312 }
313 DbgPrint("***************** Dump Complete ***************\n");
314 #endif /* TAG_STATISTICS_TRACKING */
315 }
316
317 VOID
318 MiDebugDumpNonPagedPool(BOOLEAN NewOnly)
319 {
320 BLOCK_HDR* current;
321 PLIST_ENTRY current_entry;
322 KIRQL oldIrql;
323
324 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
325
326 DbgPrint("******* Dumping non paging pool contents ******\n");
327 current_entry = UsedBlockListHead.Flink;
328 while (current_entry != &UsedBlockListHead)
329 {
330 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
331 if (!NewOnly || !current->Dumped)
332 {
333 CHAR c1, c2, c3, c4;
334
335 c1 = (current->Tag >> 24) & 0xFF;
336 c2 = (current->Tag >> 16) & 0xFF;
337 c3 = (current->Tag >> 8) & 0xFF;
338 c4 = current->Tag & 0xFF;
339
340 if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
341 {
342 DbgPrint("Size 0x%x Tag 0x%x (%c%c%c%c) Allocator 0x%x\n",
343 current->Size, current->Tag, c4, c3, c2, c1,
344 current->Caller);
345 }
346 else
347 {
348 DbgPrint("Size 0x%x Tag 0x%x Allocator 0x%x\n",
349 current->Size, current->Tag, current->Caller);
350 }
351 current->Dumped = TRUE;
352 }
353 current_entry = current_entry->Flink;
354 }
355 DbgPrint("***************** Dump Complete ***************\n");
356 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
357 }
358
359 #ifndef WHOLE_PAGE_ALLOCATIONS
360
361 #ifdef ENABLE_VALIDATE_POOL
362 static void validate_free_list(void)
363 /*
364 * FUNCTION: Validate the integrity of the list of free blocks
365 */
366 {
367 BLOCK_HDR* current;
368 PLIST_ENTRY current_entry;
369 unsigned int blocks_seen=0;
370
371 current_entry = FreeBlockListHead.Flink;
372 while (current_entry != &FreeBlockListHead)
373 {
374 unsigned int base_addr;
375
376 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
377 base_addr = (int)current;
378
379 if (current->Magic != BLOCK_HDR_FREE_MAGIC)
380 {
381 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
382 current);
383 KeBugCheck(0); //(KBUG_POOL_FREE_LIST_CORRUPT);
384 }
385
386 if (base_addr < (kernel_pool_base) ||
387 (base_addr+current->Size) > (kernel_pool_base)+NONPAGED_POOL_SIZE)
388 {
389 DbgPrint("Block %x found outside pool area\n",current);
390 DbgPrint("Size %d\n",current->Size);
391 DbgPrint("Limits are %x %x\n",kernel_pool_base,
392 kernel_pool_base+NONPAGED_POOL_SIZE);
393 KeBugCheck(0); //KBUG_POOL_FREE_LIST_CORRUPT);
394 }
395 blocks_seen++;
396 if (blocks_seen > EiNrFreeBlocks)
397 {
398 DbgPrint("Too many blocks on free list\n");
399 KeBugCheck(0); //KBUG_POOL_FREE_LIST_CORRUPT);
400 }
401 if (current->ListEntry.Flink != &FreeBlockListHead &&
402 current->ListEntry.Flink->Blink != &current->ListEntry)
403 {
404 DbgPrint("%s:%d:Break in list (current %x next %x "
405 "current->next->previous %x)\n",
406 __FILE__,__LINE__,current, current->ListEntry.Flink,
407 current->ListEntry.Flink->Blink);
408 KeBugCheck(0); //KBUG_POOL_FREE_LIST_CORRUPT);
409 }
410
411 current_entry = current_entry->Flink;
412 }
413 }
414
415 static void validate_used_list(void)
416 /*
417 * FUNCTION: Validate the integrity of the list of used blocks
418 */
419 {
420 BLOCK_HDR* current;
421 PLIST_ENTRY current_entry;
422 unsigned int blocks_seen=0;
423
424 current_entry = UsedBlockListHead.Flink;
425 while (current_entry != &UsedBlockListHead)
426 {
427 unsigned int base_addr;
428
429 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
430 base_addr = (int)current;
431
432 if (current->Magic != BLOCK_HDR_USED_MAGIC)
433 {
434 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
435 current);
436 KeBugCheck(0); //KBUG_POOL_FREE_LIST_CORRUPT);
437 }
438 if (base_addr < (kernel_pool_base) ||
439 (base_addr+current->Size) >
440 (kernel_pool_base)+NONPAGED_POOL_SIZE)
441 {
442 DbgPrint("Block %x found outside pool area\n",current);
443 KeBugCheck(0); //KBUG_POOL_FREE_LIST_CORRUPT);
444 }
445 blocks_seen++;
446 if (blocks_seen > EiNrUsedBlocks)
447 {
448 DbgPrint("Too many blocks on used list\n");
449 KeBugCheck(0); //KBUG_POOL_FREE_LIST_CORRUPT);
450 }
451 if (current->ListEntry.Flink != &UsedBlockListHead &&
452 current->ListEntry.Flink->Blink != &current->ListEntry)
453 {
454 DbgPrint("Break in list (current %x next %x)\n",
455 current, current->ListEntry.Flink);
456 KeBugCheck(0); //KBUG_POOL_FREE_LIST_CORRUPT);
457 }
458
459 current_entry = current_entry->Flink;
460 }
461 }
462
463 static void check_duplicates(BLOCK_HDR* blk)
464 /*
465 * FUNCTION: Check a block has no duplicates
466 * ARGUMENTS:
467 * blk = block to check
468 * NOTE: Bug checks if duplicates are found
469 */
470 {
471 unsigned int base = (int)blk;
472 unsigned int last = ((int)blk) + +sizeof(BLOCK_HDR) + blk->Size;
473 BLOCK_HDR* current;
474 PLIST_ENTRY current_entry;
475
476 current_entry = FreeBlockListHead.Flink;
477 while (current_entry != &FreeBlockListHead)
478 {
479 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
480
481 if (current->Magic != BLOCK_HDR_FREE_MAGIC)
482 {
483 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
484 current);
485 KeBugCheck(0); //KBUG_POOL_FREE_LIST_CORRUPT);
486 }
487
488 if ( (int)current > base && (int)current < last )
489 {
490 DbgPrint("intersecting blocks on list\n");
491 for(;;);
492 }
493 if ( (int)current < base &&
494 ((int)current + current->Size + sizeof(BLOCK_HDR))
495 > base )
496 {
497 DbgPrint("intersecting blocks on list\n");
498 for(;;);
499 }
500
501 current_entry = current_entry->Flink;
502 }
503
504 current_entry = UsedBlockListHead.Flink;
505 while (current_entry != &UsedBlockListHead)
506 {
507 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
508
509 if ( (int)current > base && (int)current < last )
510 {
511 DbgPrint("intersecting blocks on list\n");
512 for(;;);
513 }
514 if ( (int)current < base &&
515 ((int)current + current->Size + sizeof(BLOCK_HDR))
516 > base )
517 {
518 DbgPrint("intersecting blocks on list\n");
519 for(;;);
520 }
521
522 current_entry = current_entry->Flink;
523 }
524
525 }
526
527 static void validate_kernel_pool(void)
528 /*
529 * FUNCTION: Checks the integrity of the kernel memory heap
530 */
531 {
532 BLOCK_HDR* current;
533 PLIST_ENTRY current_entry;
534
535 validate_free_list();
536 validate_used_list();
537
538 current_entry = FreeBlockListHead.Flink;
539 while (current_entry != &FreeBlockListHead)
540 {
541 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
542 check_duplicates(current);
543 current_entry = current_entry->Flink;
544 }
545 current_entry = UsedBlockListHead.Flink;
546 while (current_entry != &UsedBlockListHead)
547 {
548 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
549 check_duplicates(current);
550 current_entry = current_entry->Flink;
551 }
552 }
553 #endif
554
555 #if 0
556 STATIC VOID
557 free_pages(BLOCK_HDR* blk)
558 {
559 ULONG start;
560 ULONG end;
561 ULONG i;
562
563 start = (ULONG)blk;
564 end = (ULONG)blk + sizeof(BLOCK_HDR) + blk->Size;
565
566 /*
567 * If the block doesn't contain a whole page then there is nothing to do
568 */
569 if (PAGE_ROUND_UP(start) >= PAGE_ROUND_DOWN(end))
570 {
571 return;
572 }
573 }
574 #endif
575
576 STATIC VOID
577 merge_free_block(BLOCK_HDR* blk)
578 {
579 PLIST_ENTRY next_entry;
580 BLOCK_HDR* next;
581 PLIST_ENTRY previous_entry;
582 BLOCK_HDR* previous;
583
584 next_entry = blk->ListEntry.Flink;
585 if (next_entry != &FreeBlockListHead)
586 {
587 next = CONTAINING_RECORD(next_entry, BLOCK_HDR, ListEntry);
588 if (((unsigned int)blk + sizeof(BLOCK_HDR) + blk->Size) ==
589 (unsigned int)next)
590 {
591 RemoveEntryList(&next->ListEntry);
592 blk->Size = blk->Size + sizeof(BLOCK_HDR) + next->Size;
593 EiNrFreeBlocks--;
594 }
595 }
596
597 previous_entry = blk->ListEntry.Blink;
598 if (previous_entry != &FreeBlockListHead)
599 {
600 previous = CONTAINING_RECORD(previous_entry, BLOCK_HDR, ListEntry);
601 if (((unsigned int)previous + sizeof(BLOCK_HDR) + previous->Size) ==
602 (unsigned int)blk)
603 {
604 RemoveEntryList(&blk->ListEntry);
605 previous->Size = previous->Size + sizeof(BLOCK_HDR) + blk->Size;
606 EiNrFreeBlocks--;
607 }
608 }
609 }
610
611 STATIC VOID
612 add_to_free_list(BLOCK_HDR* blk)
613 /*
614 * FUNCTION: add the block to the free list (internal)
615 */
616 {
617 PLIST_ENTRY current_entry;
618 BLOCK_HDR* current;
619
620 current_entry = FreeBlockListHead.Flink;
621 while (current_entry != &FreeBlockListHead)
622 {
623 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
624
625 if ((unsigned int)current > (unsigned int)blk)
626 {
627 blk->ListEntry.Flink = current_entry;
628 blk->ListEntry.Blink = current_entry->Blink;
629 current_entry->Blink->Flink = &blk->ListEntry;
630 current_entry->Blink = &blk->ListEntry;
631 EiNrFreeBlocks++;
632 return;
633 }
634
635 current_entry = current_entry->Flink;
636 }
637 InsertTailList(&FreeBlockListHead, &blk->ListEntry);
638 EiNrFreeBlocks++;
639 }
640
641 static void add_to_used_list(BLOCK_HDR* blk)
642 /*
643 * FUNCTION: add the block to the used list (internal)
644 */
645 {
646 InsertHeadList(&UsedBlockListHead, &blk->ListEntry);
647 EiNrUsedBlocks++;
648 }
649
650
651 static void remove_from_free_list(BLOCK_HDR* current)
652 {
653 RemoveEntryList(&current->ListEntry);
654 EiNrFreeBlocks--;
655 }
656
657
658 static void remove_from_used_list(BLOCK_HDR* current)
659 {
660 RemoveEntryList(&current->ListEntry);
661 EiNrUsedBlocks--;
662 }
663
664
665 inline static void* block_to_address(BLOCK_HDR* blk)
666 /*
667 * FUNCTION: Translate a block header address to the corresponding block
668 * address (internal)
669 */
670 {
671 return ( (void *) ((int)blk + sizeof(BLOCK_HDR)) );
672 }
673
674 inline static BLOCK_HDR* address_to_block(void* addr)
675 {
676 return (BLOCK_HDR *)
677 ( ((int)addr) - sizeof(BLOCK_HDR) );
678 }
679
680 static BLOCK_HDR* grow_kernel_pool(unsigned int size, ULONG Tag, PVOID Caller)
681 /*
682 * FUNCTION: Grow the executive heap to accomodate a block of at least 'size'
683 * bytes
684 */
685 {
686 unsigned int total_size = size + sizeof(BLOCK_HDR);
687 unsigned int nr_pages = PAGE_ROUND_UP(total_size) / PAGE_SIZE;
688 unsigned int start;
689 BLOCK_HDR* used_blk=NULL;
690 BLOCK_HDR* free_blk=NULL;
691 int i;
692 NTSTATUS Status;
693 KIRQL oldIrql;
694
695 start = (ULONG)MiAllocNonPagedPoolRegion(nr_pages);
696
697 DPRINT("growing heap for block size %d, ",size);
698 DPRINT("start %x\n",start);
699
700 for (i=0;i<nr_pages;i++)
701 {
702 PHYSICAL_ADDRESS Page;
703 /* FIXME: Check whether we can really wait here. */
704 Status = MmRequestPageMemoryConsumer(MC_NPPOOL, TRUE, &Page);
705 if (!NT_SUCCESS(Status))
706 {
707 KeBugCheck(0);
708 return(NULL);
709 }
710 Status = MmCreateVirtualMapping(NULL,
711 (PVOID)(start + (i*PAGE_SIZE)),
712 PAGE_READWRITE,
713 Page,
714 FALSE);
715 if (!NT_SUCCESS(Status))
716 {
717 DbgPrint("Unable to create virtual mapping\n");
718 KeBugCheck(0);
719 }
720 }
721
722 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
723 if ((PAGE_SIZE-(total_size%PAGE_SIZE))>(2*sizeof(BLOCK_HDR)))
724 {
725 used_blk = (struct _BLOCK_HDR *)start;
726 DPRINT("Creating block at %x\n",start);
727 used_blk->Magic = BLOCK_HDR_USED_MAGIC;
728 used_blk->Size = size;
729 add_to_used_list(used_blk);
730
731 free_blk = (BLOCK_HDR *)(start + sizeof(BLOCK_HDR) + size);
732 DPRINT("Creating block at %x\n",free_blk);
733 free_blk->Magic = BLOCK_HDR_FREE_MAGIC;
734 free_blk->Size = (nr_pages * PAGE_SIZE) -((sizeof(BLOCK_HDR)*2) + size);
735 add_to_free_list(free_blk);
736
737 EiFreeNonPagedPool = EiFreeNonPagedPool + free_blk->Size;
738 EiUsedNonPagedPool = EiUsedNonPagedPool + used_blk->Size;
739 }
740 else
741 {
742 used_blk = (struct _BLOCK_HDR *)start;
743 used_blk->Magic = BLOCK_HDR_USED_MAGIC;
744 used_blk->Size = (nr_pages * PAGE_SIZE) - sizeof(BLOCK_HDR);
745 add_to_used_list(used_blk);
746
747 EiUsedNonPagedPool = EiUsedNonPagedPool + used_blk->Size;
748 }
749
750 used_blk->Tag = Tag;
751 used_blk->Caller = Caller;
752 used_blk->Dumped = FALSE;
753 #ifdef TAG_STATISTICS_TRACKING
754 MiAddToTagHashTable(used_blk);
755 #endif /* TAG_STATISTICS_TRACKING */
756
757 VALIDATE_POOL;
758 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
759 return(used_blk);
760 }
761
762 static void* take_block(BLOCK_HDR* current, unsigned int size,
763 ULONG Tag, PVOID Caller)
764 /*
765 * FUNCTION: Allocate a used block of least 'size' from the specified
766 * free block
767 * RETURNS: The address of the created memory block
768 */
769 {
770 /*
771 * If the block is much bigger than required then split it and
772 * return a pointer to the allocated section. If the difference
773 * between the sizes is marginal it makes no sense to have the
774 * extra overhead
775 */
776 if (current->Size > (1 + size + sizeof(BLOCK_HDR)))
777 {
778 BLOCK_HDR* free_blk;
779
780 EiFreeNonPagedPool = EiFreeNonPagedPool - current->Size;
781
782 /*
783 * Replace the bigger block with a smaller block in the
784 * same position in the list
785 */
786 free_blk = (BLOCK_HDR *)(((int)current)
787 + sizeof(BLOCK_HDR) + size);
788 free_blk->Magic = BLOCK_HDR_FREE_MAGIC;
789 InsertHeadList(&current->ListEntry, &free_blk->ListEntry);
790 free_blk->Size = current->Size - (sizeof(BLOCK_HDR) + size);
791
792 current->Size=size;
793 RemoveEntryList(&current->ListEntry);
794 InsertHeadList(&UsedBlockListHead, &current->ListEntry);
795 EiNrUsedBlocks++;
796 current->Magic = BLOCK_HDR_USED_MAGIC;
797 current->Tag = Tag;
798 current->Caller = Caller;
799 current->Dumped = FALSE;
800 #ifdef TAG_STATISTICS_TRACKING
801 MiAddToTagHashTable(current);
802 #endif /* TAG_STATISTICS_TRACKING */
803
804 EiUsedNonPagedPool = EiUsedNonPagedPool + current->Size;
805 EiFreeNonPagedPool = EiFreeNonPagedPool + free_blk->Size;
806
807 VALIDATE_POOL;
808 return(block_to_address(current));
809 }
810
811 /*
812 * Otherwise allocate the whole block
813 */
814 remove_from_free_list(current);
815 add_to_used_list(current);
816
817 EiFreeNonPagedPool = EiFreeNonPagedPool - current->Size;
818 EiUsedNonPagedPool = EiUsedNonPagedPool + current->Size;
819
820 current->Magic = BLOCK_HDR_USED_MAGIC;
821 current->Tag = Tag;
822 current->Caller = Caller;
823 current->Dumped = FALSE;
824 #ifdef TAG_STATISTICS_TRACKING
825 MiAddToTagHashTable(current);
826 #endif /* TAG_STATISTICS_TRACKING */
827
828 VALIDATE_POOL;
829 return(block_to_address(current));
830 }
831
832 #endif /* not WHOLE_PAGE_ALLOCATIONS */
833
834 VOID STDCALL ExFreeNonPagedPool (PVOID block)
835 /*
836 * FUNCTION: Releases previously allocated memory
837 * ARGUMENTS:
838 * block = block to free
839 */
840 {
841 #ifdef WHOLE_PAGE_ALLOCATIONS /* WHOLE_PAGE_ALLOCATIONS */
842 KIRQL oldIrql;
843
844 if (block == NULL)
845 {
846 return;
847 }
848
849 DPRINT("freeing block %x\n",blk);
850
851 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->size,
852 ((PULONG)&block)[-1]);
853
854 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
855
856 ExFreeWholePageBlock(block);
857 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
858
859 #else /* not WHOLE_PAGE_ALLOCATIONS */
860
861 BLOCK_HDR* blk=address_to_block(block);
862 KIRQL oldIrql;
863
864 if (block == NULL)
865 {
866 return;
867 }
868
869 DPRINT("freeing block %x\n",blk);
870
871 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->size,
872 ((PULONG)&block)[-1]);
873
874 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
875
876 VALIDATE_POOL;
877
878 if (blk->Magic != BLOCK_HDR_USED_MAGIC)
879 {
880 if (blk->Magic == BLOCK_HDR_FREE_MAGIC)
881 {
882 DbgPrint("ExFreePool of already freed address %x\n", block);
883 }
884 else
885 {
886 DbgPrint("ExFreePool of non-allocated address %x (magic %x)\n",
887 block, blk->Magic);
888 }
889 KeBugCheck(0);
890 return;
891 }
892
893 memset(block, 0xcc, blk->Size);
894
895 #ifdef TAG_STATISTICS_TRACKING
896 MiRemoveFromTagHashTable(blk);
897 #endif /* TAG_STATISTICS_TRACKING */
898 remove_from_used_list(blk);
899 blk->Magic = BLOCK_HDR_FREE_MAGIC;
900 add_to_free_list(blk);
901 merge_free_block(blk);
902
903 EiUsedNonPagedPool = EiUsedNonPagedPool - blk->Size;
904 EiFreeNonPagedPool = EiFreeNonPagedPool + blk->Size;
905 VALIDATE_POOL;
906 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
907
908 #endif /* WHOLE_PAGE_ALLOCATIONS */
909 }
910
911 PVOID STDCALL
912 ExAllocateNonPagedPoolWithTag(ULONG Type,
913 ULONG Size,
914 ULONG Tag,
915 PVOID Caller)
916 {
917 #ifdef WHOLE_PAGE_ALLOCATIONS
918 PVOID block;
919 KIRQL oldIrql;
920
921 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
922 Size,Caller);
923
924 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
925
926 /*
927 * accomodate this useful idiom
928 */
929 if (Size == 0)
930 {
931 POOL_TRACE("= NULL\n");
932 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
933 return(NULL);
934 }
935
936 block = ExAllocateWholePageBlock(Size);
937 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
938 return(block);
939
940 #else /* not WHOLE_PAGE_ALLOCATIONS */
941 BLOCK_HDR* current = NULL;
942 PLIST_ENTRY current_entry;
943 PVOID block;
944 BLOCK_HDR* best = NULL;
945 KIRQL oldIrql;
946
947 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
948 Size,Caller);
949
950 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
951
952 VALIDATE_POOL;
953
954 /*
955 * accomodate this useful idiom
956 */
957 if (Size == 0)
958 {
959 POOL_TRACE("= NULL\n");
960 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
961 return(NULL);
962 }
963
964 /*
965 * Look for an already created block of sufficent size
966 */
967 current_entry = FreeBlockListHead.Flink;
968 while (current_entry != &FreeBlockListHead)
969 {
970 DPRINT("current %x size %x tag_next %x\n",current,current->Size,
971 current->tag_next);
972 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
973 if (current->Size >= Size &&
974 (best == NULL || current->Size < best->Size))
975 {
976 best = current;
977 }
978 current_entry = current_entry->Flink;
979 }
980 if (best != NULL)
981 {
982 block=take_block(best, Size, Tag, Caller);
983 VALIDATE_POOL;
984 memset(block,0,Size);
985 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
986
987 return(block);
988 }
989
990
991 /*
992 * Otherwise create a new block
993 */
994 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
995 block=block_to_address(grow_kernel_pool(Size, Tag, Caller));
996
997 memset(block, 0, Size);
998
999 return(block);
1000 #endif /* WHOLE_PAGE_ALLOCATIONS */
1001 }
1002
1003 #ifdef WHOLE_PAGE_ALLOCATIONS
1004
1005 PVOID STDCALL
1006 ExAllocateWholePageBlock(ULONG UserSize)
1007 {
1008 PVOID Address;
1009 PHYSICAL_ADDRESS Page;
1010 ULONG i;
1011 ULONG Size;
1012 ULONG NrPages;
1013
1014 Size = sizeof(ULONG) + UserSize;
1015 NrPages = ROUND_UP(Size, PAGE_SIZE) / PAGE_SIZE;
1016
1017 Address = MiAllocNonPagedPoolRegion(NrPages + 1);
1018
1019 for (i = 0; i < NrPages; i++)
1020 {
1021 Page = MmAllocPage(MC_NPPOOL, 0);
1022 if (Page.QuadPart == 0LL)
1023 {
1024 KeBugCheck(0);
1025 }
1026 MmCreateVirtualMapping(NULL,
1027 Address + (i * PAGE_SIZE),
1028 PAGE_READWRITE | PAGE_SYSTEM,
1029 Page,
1030 TRUE);
1031 }
1032
1033 *((PULONG)((ULONG)Address + (NrPages * PAGE_SIZE) - Size)) = NrPages;
1034 return((PVOID)((ULONG)Address + (NrPages * PAGE_SIZE) - UserSize));
1035 }
1036
1037 VOID STDCALL
1038 ExFreeWholePageBlock(PVOID Addr)
1039 {
1040 ULONG NrPages;
1041
1042 if ((ULONG)Addr < kernel_pool_base ||
1043 (ULONG)Addr >= (kernel_pool_base + NONPAGED_POOL_SIZE))
1044 {
1045 DbgPrint("Block %x found outside pool area\n", Addr);
1046 KeBugCheck(0);
1047 }
1048 NrPages = *(PULONG)((ULONG)Addr - sizeof(ULONG));
1049 MiFreeNonPagedPoolRegion((PVOID)PAGE_ROUND_DOWN((ULONG)Addr), NrPages, TRUE);
1050 }
1051
1052 #endif /* WHOLE_PAGE_ALLOCATIONS */
1053
1054 /* EOF */