Swapping to disk
[reactos.git] / reactos / ntoskrnl / mm / npool.c
1 /* $Id: npool.c,v 1.53 2001/12/31 19:06:47 dwelch Exp $
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/npool.c
6 * PURPOSE: Implements the kernel memory pool
7 * PROGRAMMER: David Welch (welch@cwcom.net)
8 * UPDATE HISTORY:
9 * 27/05/98: Created
10 * 10/06/98: Bug fixes by Iwan Fatahi (i_fatahi@hotmail.com)
11 * in take_block (if current bigger than required)
12 * in remove_from_used_list
13 * in ExFreePool
14 * 23/08/98: Fixes from Robert Bergkvist (fragdance@hotmail.com)
15 */
16
17 /* INCLUDES ****************************************************************/
18
19 #include <ddk/ntddk.h>
20 #include <internal/mm.h>
21 #include <internal/ntoskrnl.h>
22 #include <internal/pool.h>
23
24 #define NDEBUG
25 #include <internal/debug.h>
26
27 /* Enable strict checking of the nonpaged pool on every allocation */
28 //#define ENABLE_VALIDATE_POOL
29
30 /* Enable tracking of statistics about the tagged blocks in the pool */
31 #define TAG_STATISTICS_TRACKING
32
33 /*
34 * Put each block in its own range of pages and position the block at the
35 * end of the range so any accesses beyond the end of block are to invalid
36 * memory locations.
37 */
38 //#define WHOLE_PAGE_ALLOCATIONS
39
40 #ifdef ENABLE_VALIDATE_POOL
41 #define VALIDATE_POOL validate_kernel_pool()
42 #else
43 #define VALIDATE_POOL
44 #endif
45
46 #if 0
47 #define POOL_TRACE(args...) do { DbgPrint(args); } while(0);
48 #else
49 #define POOL_TRACE(args...)
50 #endif
51
52 /* TYPES *******************************************************************/
53
54 #define BLOCK_HDR_USED_MAGIC (0xdeadbeef)
55 #define BLOCK_HDR_FREE_MAGIC (0xceadbeef)
56
57 /*
58 * fields present at the start of a block (this is for internal use only)
59 */
60 typedef struct _BLOCK_HDR
61 {
62 ULONG Magic;
63 ULONG Size;
64 LIST_ENTRY ListEntry;
65 ULONG Tag;
66 PVOID Caller;
67 struct _BLOCK_HDR* tag_next;
68 BOOLEAN Dumped;
69 } BLOCK_HDR;
70
71 PVOID STDCALL
72 ExAllocateWholePageBlock(ULONG Size);
73 VOID STDCALL
74 ExFreeWholePageBlock(PVOID Addr);
75
76 /* GLOBALS *****************************************************************/
77
78 /*
79 * Memory managment initalized symbol for the base of the pool
80 */
81 static unsigned int kernel_pool_base = 0;
82
83 /*
84 * Head of the list of free blocks
85 */
86 static LIST_ENTRY FreeBlockListHead;
87
88 /*
89 * Head of the list of in use block
90 */
91 static LIST_ENTRY UsedBlockListHead;
92
93 #ifndef WHOLE_PAGE_ALLOCATIONS
94 /*
95 * Count of free blocks
96 */
97 static ULONG EiNrFreeBlocks = 0;
98
99 /*
100 * Count of used blocks
101 */
102 static ULONG EiNrUsedBlocks = 0;
103 #endif
104
105 /*
106 * Lock that protects the non-paged pool data structures
107 */
108 static KSPIN_LOCK MmNpoolLock;
109
110 /*
111 * Total memory used for free nonpaged pool blocks
112 */
113 ULONG EiFreeNonPagedPool = 0;
114
115 /*
116 * Total memory used for nonpaged pool blocks
117 */
118 ULONG EiUsedNonPagedPool = 0;
119
120 /*
121 * Allocate a range of memory in the nonpaged pool
122 */
123 PVOID
124 MiAllocNonPagedPoolRegion(unsigned int nr_pages);
125
126 VOID
127 MiFreeNonPagedPoolRegion(PVOID Addr, ULONG Count, BOOLEAN Free);
128
129 #ifdef TAG_STATISTICS_TRACKING
130 #define TAG_HASH_TABLE_SIZE (1024)
131 static BLOCK_HDR* tag_hash_table[TAG_HASH_TABLE_SIZE];
132 #endif /* TAG_STATISTICS_TRACKING */
133
134 /* FUNCTIONS ***************************************************************/
135
136 #ifdef TAG_STATISTICS_TRACKING
137 VOID
138 MiRemoveFromTagHashTable(BLOCK_HDR* block)
139 /*
140 * Remove a block from the tag hash table
141 */
142 {
143 BLOCK_HDR* previous;
144 BLOCK_HDR* current;
145 ULONG hash;
146
147 if (block->Tag == 0)
148 {
149 return;
150 }
151
152 hash = block->Tag % TAG_HASH_TABLE_SIZE;
153
154 previous = NULL;
155 current = tag_hash_table[hash];
156 while (current != NULL)
157 {
158 if (current == block)
159 {
160 if (previous == NULL)
161 {
162 tag_hash_table[hash] = block->tag_next;
163 }
164 else
165 {
166 previous->tag_next = block->tag_next;
167 }
168 return;
169 }
170 previous = current;
171 current = current->tag_next;
172 }
173 DPRINT1("Tagged block wasn't on hash table list (Tag %x Caller %x)\n",
174 block->Tag, block->Caller);
175 KeBugCheck(0);
176 }
177
178 VOID
179 MiAddToTagHashTable(BLOCK_HDR* block)
180 /*
181 * Add a block to the tag hash table
182 */
183 {
184 ULONG hash;
185 BLOCK_HDR* current;
186 BLOCK_HDR* previous;
187
188 if (block->Tag == 0)
189 {
190 return;
191 }
192
193 hash = block->Tag % TAG_HASH_TABLE_SIZE;
194
195 previous = NULL;
196 current = tag_hash_table[hash];
197 while (current != NULL)
198 {
199 if (current->Tag == block->Tag)
200 {
201 block->tag_next = current->tag_next;
202 current->tag_next = block;
203 return;
204 }
205 previous = current;
206 if ((PVOID)current->tag_next >= (PVOID)0xc1123160)
207 {
208 DbgPrint("previous %x\n", previous);
209 }
210 current = current->tag_next;
211 }
212 block->tag_next = NULL;
213 if (previous == NULL)
214 {
215 tag_hash_table[hash] = block;
216 }
217 else
218 {
219 previous->tag_next = block;
220 }
221 }
222 #endif /* TAG_STATISTICS_TRACKING */
223
224 VOID
225 ExInitNonPagedPool(ULONG BaseAddress)
226 {
227 kernel_pool_base = BaseAddress;
228 KeInitializeSpinLock(&MmNpoolLock);
229 MmInitKernelMap((PVOID)BaseAddress);
230 memset(tag_hash_table, 0, sizeof(tag_hash_table));
231 InitializeListHead(&FreeBlockListHead);
232 InitializeListHead(&UsedBlockListHead);
233 }
234
235 #ifdef TAG_STATISTICS_TRACKING
236 VOID STATIC
237 MiDumpTagStats(ULONG CurrentTag, ULONG CurrentNrBlocks, ULONG CurrentSize)
238 {
239 CHAR c1, c2, c3, c4;
240
241 c1 = (CurrentTag >> 24) & 0xFF;
242 c2 = (CurrentTag >> 16) & 0xFF;
243 c3 = (CurrentTag >> 8) & 0xFF;
244 c4 = CurrentTag & 0xFF;
245
246 if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
247 {
248 DbgPrint("Tag %x (%c%c%c%c) Blocks %d Total Size %d Average Size %d\n",
249 CurrentTag, c4, c3, c2, c1, CurrentNrBlocks,
250 CurrentSize, CurrentSize / CurrentNrBlocks);
251 }
252 else
253 {
254 DbgPrint("Tag %x Blocks %d Total Size %d Average Size %d\n",
255 CurrentTag, CurrentNrBlocks, CurrentSize,
256 CurrentSize / CurrentNrBlocks);
257 }
258 }
259 #endif /* TAG_STATISTICS_TRACKING */
260
261 VOID
262 MiDebugDumpNonPagedPoolStats(BOOLEAN NewOnly)
263 {
264 #ifdef TAG_STATISTICS_TRACKING
265 ULONG i;
266 BLOCK_HDR* current;
267 ULONG CurrentTag;
268 ULONG CurrentNrBlocks;
269 ULONG CurrentSize;
270 ULONG TotalBlocks;
271 ULONG TotalSize;
272
273 DbgPrint("******* Dumping non paging pool stats ******\n");
274 TotalBlocks = 0;
275 TotalSize = 0;
276 for (i = 0; i < TAG_HASH_TABLE_SIZE; i++)
277 {
278 CurrentTag = 0;
279 CurrentNrBlocks = 0;
280 CurrentSize = 0;
281 current = tag_hash_table[i];
282 while (current != NULL)
283 {
284 if (current->Tag != CurrentTag)
285 {
286 if (CurrentTag != 0 && CurrentNrBlocks != 0)
287 {
288 MiDumpTagStats(CurrentTag, CurrentNrBlocks, CurrentSize);
289 }
290 CurrentTag = current->Tag;
291 CurrentNrBlocks = 0;
292 CurrentSize = 0;
293 }
294
295 if (!NewOnly || !current->Dumped)
296 {
297 CurrentNrBlocks++;
298 TotalBlocks++;
299 CurrentSize = CurrentSize + current->Size;
300 TotalSize = TotalSize + current->Size;
301 current->Dumped = TRUE;
302 }
303 current = current->tag_next;
304 }
305 if (CurrentTag != 0 && CurrentNrBlocks != 0)
306 {
307 MiDumpTagStats(CurrentTag, CurrentNrBlocks, CurrentSize);
308 }
309 }
310 if (TotalBlocks != 0)
311 {
312 DbgPrint("TotalBlocks %d TotalSize %d AverageSize %d\n",
313 TotalBlocks, TotalSize, TotalSize / TotalBlocks);
314 }
315 else
316 {
317 DbgPrint("TotalBlocks %d TotalSize %d\n",
318 TotalBlocks, TotalSize);
319 }
320 DbgPrint("***************** Dump Complete ***************\n");
321 #endif /* TAG_STATISTICS_TRACKING */
322 }
323
324 VOID
325 MiDebugDumpNonPagedPool(BOOLEAN NewOnly)
326 {
327 BLOCK_HDR* current;
328 PLIST_ENTRY current_entry;
329 KIRQL oldIrql;
330
331 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
332
333 DbgPrint("******* Dumping non paging pool contents ******\n");
334 current_entry = UsedBlockListHead.Flink;
335 while (current_entry != &UsedBlockListHead)
336 {
337 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
338 if (!NewOnly || !current->Dumped)
339 {
340 CHAR c1, c2, c3, c4;
341
342 c1 = (current->Tag >> 24) & 0xFF;
343 c2 = (current->Tag >> 16) & 0xFF;
344 c3 = (current->Tag >> 8) & 0xFF;
345 c4 = current->Tag & 0xFF;
346
347 if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
348 {
349 DbgPrint("Size 0x%x Tag 0x%x (%c%c%c%c) Allocator 0x%x\n",
350 current->Size, current->Tag, c4, c3, c2, c1,
351 current->Caller);
352 }
353 else
354 {
355 DbgPrint("Size 0x%x Tag 0x%x Allocator 0x%x\n",
356 current->Size, current->Tag, current->Caller);
357 }
358 current->Dumped = TRUE;
359 }
360 current_entry = current_entry->Flink;
361 }
362 DbgPrint("***************** Dump Complete ***************\n");
363 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
364 }
365
366 #ifndef WHOLE_PAGE_ALLOCATIONS
367
368 #ifdef ENABLE_VALIDATE_POOL
369 static void validate_free_list(void)
370 /*
371 * FUNCTION: Validate the integrity of the list of free blocks
372 */
373 {
374 BLOCK_HDR* current;
375 PLIST_ENTRY current_entry;
376 unsigned int blocks_seen=0;
377
378 current_entry = FreeBlockListHead.Flink;
379 while (current_entry != &FreeBlockListHead)
380 {
381 unsigned int base_addr;
382
383 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
384 base_addr = (int)current;
385
386 if (current->Magic != BLOCK_HDR_FREE_MAGIC)
387 {
388 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
389 current);
390 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
391 }
392
393 if (base_addr < (kernel_pool_base) ||
394 (base_addr+current->Size) > (kernel_pool_base)+NONPAGED_POOL_SIZE)
395 {
396 DbgPrint("Block %x found outside pool area\n",current);
397 DbgPrint("Size %d\n",current->Size);
398 DbgPrint("Limits are %x %x\n",kernel_pool_base,
399 kernel_pool_base+NONPAGED_POOL_SIZE);
400 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
401 }
402 blocks_seen++;
403 if (blocks_seen > EiNrFreeBlocks)
404 {
405 DbgPrint("Too many blocks on free list\n");
406 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
407 }
408 if (current->ListEntry.Flink != &FreeBlockListHead &&
409 current->ListEntry.Flink->Blink != &current->ListEntry)
410 {
411 DbgPrint("%s:%d:Break in list (current %x next %x "
412 "current->next->previous %x)\n",
413 __FILE__,__LINE__,current, current->ListEntry.Flink,
414 current->ListEntry.Flink->Blink);
415 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
416 }
417
418 current_entry = current_entry->Flink;
419 }
420 }
421
422 static void validate_used_list(void)
423 /*
424 * FUNCTION: Validate the integrity of the list of used blocks
425 */
426 {
427 BLOCK_HDR* current;
428 PLIST_ENTRY current_entry;
429 unsigned int blocks_seen=0;
430
431 current_entry = UsedBlockListHead.Flink;
432 while (current_entry != &UsedBlockListHead)
433 {
434 unsigned int base_addr;
435
436 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
437 base_addr = (int)current;
438
439 if (current->Magic != BLOCK_HDR_USED_MAGIC)
440 {
441 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
442 current);
443 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
444 }
445 if (base_addr < (kernel_pool_base) ||
446 (base_addr+current->Size) >
447 (kernel_pool_base)+NONPAGED_POOL_SIZE)
448 {
449 DbgPrint("Block %x found outside pool area\n",current);
450 for(;;);
451 }
452 blocks_seen++;
453 if (blocks_seen > EiNrUsedBlocks)
454 {
455 DbgPrint("Too many blocks on used list\n");
456 for(;;);
457 }
458 if (current->ListEntry.Flink != &UsedBlockListHead &&
459 current->ListEntry.Flink->Blink != &current->ListEntry)
460 {
461 DbgPrint("Break in list (current %x next %x)\n",
462 current, current->ListEntry.Flink);
463 for(;;);
464 }
465
466 current_entry = current_entry->Flink;
467 }
468 }
469
470 static void check_duplicates(BLOCK_HDR* blk)
471 /*
472 * FUNCTION: Check a block has no duplicates
473 * ARGUMENTS:
474 * blk = block to check
475 * NOTE: Bug checks if duplicates are found
476 */
477 {
478 unsigned int base = (int)blk;
479 unsigned int last = ((int)blk) + +sizeof(BLOCK_HDR) + blk->Size;
480 BLOCK_HDR* current;
481 PLIST_ENTRY current_entry;
482
483 current_entry = FreeBlockListHead.Flink;
484 while (current_entry != &FreeBlockListHead)
485 {
486 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
487
488 if (current->Magic != BLOCK_HDR_FREE_MAGIC)
489 {
490 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
491 current);
492 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
493 }
494
495 if ( (int)current > base && (int)current < last )
496 {
497 DbgPrint("intersecting blocks on list\n");
498 for(;;);
499 }
500 if ( (int)current < base &&
501 ((int)current + current->Size + sizeof(BLOCK_HDR))
502 > base )
503 {
504 DbgPrint("intersecting blocks on list\n");
505 for(;;);
506 }
507
508 current_entry = current_entry->Flink;
509 }
510
511 current_entry = UsedBlockListHead.Flink;
512 while (current_entry != &UsedBlockListHead)
513 {
514 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
515
516 if ( (int)current > base && (int)current < last )
517 {
518 DbgPrint("intersecting blocks on list\n");
519 for(;;);
520 }
521 if ( (int)current < base &&
522 ((int)current + current->Size + sizeof(BLOCK_HDR))
523 > base )
524 {
525 DbgPrint("intersecting blocks on list\n");
526 for(;;);
527 }
528
529 current_entry = current_entry->Flink;
530 }
531
532 }
533
534 static void validate_kernel_pool(void)
535 /*
536 * FUNCTION: Checks the integrity of the kernel memory heap
537 */
538 {
539 BLOCK_HDR* current;
540 PLIST_ENTRY current_entry;
541
542 validate_free_list();
543 validate_used_list();
544
545 current_entry = FreeBlockListHead.Flink;
546 while (current_entry != &FreeBlockListHead)
547 {
548 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
549 check_duplicates(current);
550 current_entry = current_entry->Flink;
551 }
552 current_entry = UsedBlockListHead.Flink;
553 while (current_entry != &UsedBlockListHead)
554 {
555 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
556 check_duplicates(current);
557 current_entry = current_entry->Flink;
558 }
559 }
560 #endif
561
562 #if 0
563 STATIC VOID
564 free_pages(BLOCK_HDR* blk)
565 {
566 ULONG start;
567 ULONG end;
568 ULONG i;
569
570 start = (ULONG)blk;
571 end = (ULONG)blk + sizeof(BLOCK_HDR) + blk->Size;
572
573 /*
574 * If the block doesn't contain a whole page then there is nothing to do
575 */
576 if (PAGE_ROUND_UP(start) >= PAGE_ROUND_DOWN(end))
577 {
578 return;
579 }
580 }
581 #endif
582
583 STATIC VOID
584 merge_free_block(BLOCK_HDR* blk)
585 {
586 PLIST_ENTRY next_entry;
587 BLOCK_HDR* next;
588 PLIST_ENTRY previous_entry;
589 BLOCK_HDR* previous;
590
591 next_entry = blk->ListEntry.Flink;
592 if (next_entry != &FreeBlockListHead)
593 {
594 next = CONTAINING_RECORD(next_entry, BLOCK_HDR, ListEntry);
595 if (((unsigned int)blk + sizeof(BLOCK_HDR) + blk->Size) ==
596 (unsigned int)next)
597 {
598 RemoveEntryList(&next->ListEntry);
599 blk->Size = blk->Size + sizeof(BLOCK_HDR) + next->Size;
600 EiNrFreeBlocks--;
601 }
602 }
603
604 previous_entry = blk->ListEntry.Blink;
605 if (previous_entry != &FreeBlockListHead)
606 {
607 previous = CONTAINING_RECORD(previous_entry, BLOCK_HDR, ListEntry);
608 if (((unsigned int)previous + sizeof(BLOCK_HDR) + previous->Size) ==
609 (unsigned int)blk)
610 {
611 RemoveEntryList(&blk->ListEntry);
612 previous->Size = previous->Size + sizeof(BLOCK_HDR) + blk->Size;
613 EiNrFreeBlocks--;
614 }
615 }
616 }
617
618 STATIC VOID
619 add_to_free_list(BLOCK_HDR* blk)
620 /*
621 * FUNCTION: add the block to the free list (internal)
622 */
623 {
624 PLIST_ENTRY current_entry;
625 BLOCK_HDR* current;
626
627 current_entry = FreeBlockListHead.Flink;
628 while (current_entry != &FreeBlockListHead)
629 {
630 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
631
632 if ((unsigned int)current > (unsigned int)blk)
633 {
634 blk->ListEntry.Flink = current_entry;
635 blk->ListEntry.Blink = current_entry->Blink;
636 current_entry->Blink->Flink = &blk->ListEntry;
637 current_entry->Blink = &blk->ListEntry;
638 EiNrFreeBlocks++;
639 return;
640 }
641
642 current_entry = current_entry->Flink;
643 }
644 InsertTailList(&FreeBlockListHead, &blk->ListEntry);
645 EiNrFreeBlocks++;
646 }
647
648 static void add_to_used_list(BLOCK_HDR* blk)
649 /*
650 * FUNCTION: add the block to the used list (internal)
651 */
652 {
653 InsertHeadList(&UsedBlockListHead, &blk->ListEntry);
654 EiNrUsedBlocks++;
655 }
656
657
658 static void remove_from_free_list(BLOCK_HDR* current)
659 {
660 RemoveEntryList(&current->ListEntry);
661 EiNrFreeBlocks--;
662 }
663
664
665 static void remove_from_used_list(BLOCK_HDR* current)
666 {
667 RemoveEntryList(&current->ListEntry);
668 EiNrUsedBlocks--;
669 }
670
671
672 inline static void* block_to_address(BLOCK_HDR* blk)
673 /*
674 * FUNCTION: Translate a block header address to the corresponding block
675 * address (internal)
676 */
677 {
678 return ( (void *) ((int)blk + sizeof(BLOCK_HDR)) );
679 }
680
681 inline static BLOCK_HDR* address_to_block(void* addr)
682 {
683 return (BLOCK_HDR *)
684 ( ((int)addr) - sizeof(BLOCK_HDR) );
685 }
686
687 static BLOCK_HDR* grow_kernel_pool(unsigned int size, ULONG Tag, PVOID Caller)
688 /*
689 * FUNCTION: Grow the executive heap to accomodate a block of at least 'size'
690 * bytes
691 */
692 {
693 unsigned int total_size = size + sizeof(BLOCK_HDR);
694 unsigned int nr_pages = PAGE_ROUND_UP(total_size) / PAGESIZE;
695 unsigned int start = (ULONG)MiAllocNonPagedPoolRegion(nr_pages);
696 BLOCK_HDR* used_blk=NULL;
697 BLOCK_HDR* free_blk=NULL;
698 int i;
699 NTSTATUS Status;
700
701 DPRINT("growing heap for block size %d, ",size);
702 DPRINT("start %x\n",start);
703
704 for (i=0;i<nr_pages;i++)
705 {
706 PVOID Page;
707 Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &Page);
708 if (!NT_SUCCESS(Status))
709 {
710 return(NULL);
711 }
712 Status = MmCreateVirtualMapping(NULL,
713 (PVOID)(start + (i*PAGESIZE)),
714 PAGE_READWRITE,
715 (ULONG)Page);
716 if (!NT_SUCCESS(Status))
717 {
718 DbgPrint("Unable to create virtual mapping\n");
719 KeBugCheck(0);
720 }
721 }
722
723
724 if ((PAGESIZE-(total_size%PAGESIZE))>(2*sizeof(BLOCK_HDR)))
725 {
726 used_blk = (struct _BLOCK_HDR *)start;
727 DPRINT("Creating block at %x\n",start);
728 used_blk->Magic = BLOCK_HDR_USED_MAGIC;
729 used_blk->Size = size;
730 add_to_used_list(used_blk);
731
732 free_blk = (BLOCK_HDR *)(start + sizeof(BLOCK_HDR) + size);
733 DPRINT("Creating block at %x\n",free_blk);
734 free_blk->Magic = BLOCK_HDR_FREE_MAGIC;
735 free_blk->Size = (nr_pages * PAGESIZE) -((sizeof(BLOCK_HDR)*2) + size);
736 add_to_free_list(free_blk);
737
738 EiFreeNonPagedPool = EiFreeNonPagedPool + free_blk->Size;
739 EiUsedNonPagedPool = EiUsedNonPagedPool + used_blk->Size;
740 }
741 else
742 {
743 used_blk = (struct _BLOCK_HDR *)start;
744 used_blk->Magic = BLOCK_HDR_USED_MAGIC;
745 used_blk->Size = (nr_pages * PAGESIZE) - sizeof(BLOCK_HDR);
746 add_to_used_list(used_blk);
747
748 EiUsedNonPagedPool = EiUsedNonPagedPool + used_blk->Size;
749 }
750
751 used_blk->Tag = Tag;
752 used_blk->Caller = Caller;
753 used_blk->Dumped = FALSE;
754 #ifdef TAG_STATISTICS_TRACKING
755 MiAddToTagHashTable(used_blk);
756 #endif /* TAG_STATISTICS_TRACKING */
757
758 VALIDATE_POOL;
759 return(used_blk);
760 }
761
762 static void* take_block(BLOCK_HDR* current, unsigned int size,
763 ULONG Tag, PVOID Caller)
764 /*
765 * FUNCTION: Allocate a used block of least 'size' from the specified
766 * free block
767 * RETURNS: The address of the created memory block
768 */
769 {
770 /*
771 * If the block is much bigger than required then split it and
772 * return a pointer to the allocated section. If the difference
773 * between the sizes is marginal it makes no sense to have the
774 * extra overhead
775 */
776 if (current->Size > (1 + size + sizeof(BLOCK_HDR)))
777 {
778 BLOCK_HDR* free_blk;
779
780 EiFreeNonPagedPool = EiFreeNonPagedPool - current->Size;
781
782 /*
783 * Replace the bigger block with a smaller block in the
784 * same position in the list
785 */
786 free_blk = (BLOCK_HDR *)(((int)current)
787 + sizeof(BLOCK_HDR) + size);
788 free_blk->Magic = BLOCK_HDR_FREE_MAGIC;
789 InsertHeadList(&current->ListEntry, &free_blk->ListEntry);
790 free_blk->Size = current->Size - (sizeof(BLOCK_HDR) + size);
791
792 current->Size=size;
793 RemoveEntryList(&current->ListEntry);
794 InsertHeadList(&UsedBlockListHead, &current->ListEntry);
795 EiNrUsedBlocks++;
796 current->Magic = BLOCK_HDR_USED_MAGIC;
797 current->Tag = Tag;
798 current->Caller = Caller;
799 current->Dumped = FALSE;
800 #ifdef TAG_STATISTICS_TRACKING
801 MiAddToTagHashTable(current);
802 #endif /* TAG_STATISTICS_TRACKING */
803
804 EiUsedNonPagedPool = EiUsedNonPagedPool + current->Size;
805 EiFreeNonPagedPool = EiFreeNonPagedPool + free_blk->Size;
806
807 VALIDATE_POOL;
808 return(block_to_address(current));
809 }
810
811 /*
812 * Otherwise allocate the whole block
813 */
814 remove_from_free_list(current);
815 add_to_used_list(current);
816
817 EiFreeNonPagedPool = EiFreeNonPagedPool - current->Size;
818 EiUsedNonPagedPool = EiUsedNonPagedPool + current->Size;
819
820 current->Magic = BLOCK_HDR_USED_MAGIC;
821 current->Tag = Tag;
822 current->Caller = Caller;
823 current->Dumped = FALSE;
824 #ifdef TAG_STATISTICS_TRACKING
825 MiAddToTagHashTable(current);
826 #endif /* TAG_STATISTICS_TRACKING */
827
828 VALIDATE_POOL;
829 return(block_to_address(current));
830 }
831
832 #endif /* not WHOLE_PAGE_ALLOCATIONS */
833
834 VOID STDCALL ExFreeNonPagedPool (PVOID block)
835 /*
836 * FUNCTION: Releases previously allocated memory
837 * ARGUMENTS:
838 * block = block to free
839 */
840 {
841 #ifdef WHOLE_PAGE_ALLOCATIONS /* WHOLE_PAGE_ALLOCATIONS */
842 KIRQL oldIrql;
843
844 if (block == NULL)
845 {
846 return;
847 }
848
849 DPRINT("freeing block %x\n",blk);
850
851 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->size,
852 ((PULONG)&block)[-1]);
853
854 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
855
856 ExFreeWholePageBlock(block);
857 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
858
859 #else /* not WHOLE_PAGE_ALLOCATIONS */
860
861 BLOCK_HDR* blk=address_to_block(block);
862 KIRQL oldIrql;
863
864 if (block == NULL)
865 {
866 return;
867 }
868
869 DPRINT("freeing block %x\n",blk);
870
871 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->size,
872 ((PULONG)&block)[-1]);
873
874 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
875
876 VALIDATE_POOL;
877
878 if (blk->Magic != BLOCK_HDR_USED_MAGIC)
879 {
880 if (blk->Magic == BLOCK_HDR_FREE_MAGIC)
881 {
882 DbgPrint("ExFreePool of already freed address %x\n", block);
883 }
884 else
885 {
886 DbgPrint("ExFreePool of non-allocated address %x (magic %x)\n",
887 block, blk->Magic);
888 }
889 KeBugCheck(0);
890 return;
891 }
892
893 memset(block, 0xcc, blk->Size);
894
895 #ifdef TAG_STATISTICS_TRACKING
896 MiRemoveFromTagHashTable(blk);
897 #endif /* TAG_STATISTICS_TRACKING */
898 remove_from_used_list(blk);
899 blk->Magic = BLOCK_HDR_FREE_MAGIC;
900 add_to_free_list(blk);
901 merge_free_block(blk);
902
903 EiUsedNonPagedPool = EiUsedNonPagedPool - blk->Size;
904 EiFreeNonPagedPool = EiFreeNonPagedPool + blk->Size;
905 VALIDATE_POOL;
906 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
907
908 #endif /* WHOLE_PAGE_ALLOCATIONS */
909 }
910
911 PVOID STDCALL
912 ExAllocateNonPagedPoolWithTag(ULONG Type, ULONG Size, ULONG Tag, PVOID Caller)
913 {
914 #ifdef WHOLE_PAGE_ALLOCATIONS
915 PVOID block;
916 KIRQL oldIrql;
917
918 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
919 Size,Caller);
920
921 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
922
923 /*
924 * accomodate this useful idiom
925 */
926 if (Size == 0)
927 {
928 POOL_TRACE("= NULL\n");
929 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
930 return(NULL);
931 }
932
933 block = ExAllocateWholePageBlock(Size);
934 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
935 return(block);
936
937 #else /* not WHOLE_PAGE_ALLOCATIONS */
938 BLOCK_HDR* current = NULL;
939 PLIST_ENTRY current_entry;
940 PVOID block;
941 BLOCK_HDR* best = NULL;
942 KIRQL oldIrql;
943
944 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
945 Size,Caller);
946
947 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
948
949 VALIDATE_POOL;
950
951 /*
952 * accomodate this useful idiom
953 */
954 if (Size == 0)
955 {
956 POOL_TRACE("= NULL\n");
957 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
958 return(NULL);
959 }
960
961 /*
962 * Look for an already created block of sufficent size
963 */
964 current_entry = FreeBlockListHead.Flink;
965 while (current_entry != &FreeBlockListHead)
966 {
967 DPRINT("current %x size %x tag_next %x\n",current,current->Size,
968 current->tag_next);
969 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
970 if (current->Size >= Size &&
971 (best == NULL || current->Size < best->Size))
972 {
973 best = current;
974 }
975 current_entry = current_entry->Flink;
976 }
977 if (best != NULL)
978 {
979 block=take_block(best, Size, Tag, Caller);
980 VALIDATE_POOL;
981 memset(block,0,Size);
982 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
983 return(block);
984 }
985
986
987 /*
988 * Otherwise create a new block
989 */
990 block=block_to_address(grow_kernel_pool(Size, Tag, Caller));
991 VALIDATE_POOL;
992 memset(block, 0, Size);
993 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
994 return(block);
995 #endif /* WHOLE_PAGE_ALLOCATIONS */
996 }
997
998 #ifdef WHOLE_PAGE_ALLOCATIONS
999
1000 PVOID STDCALL
1001 ExAllocateWholePageBlock(ULONG UserSize)
1002 {
1003 PVOID Address;
1004 PVOID Page;
1005 ULONG i;
1006 ULONG Size;
1007 ULONG NrPages;
1008
1009 Size = sizeof(ULONG) + UserSize;
1010 NrPages = ROUND_UP(Size, PAGESIZE) / PAGESIZE;
1011
1012 Address = MiAllocNonPagedPoolRegion(NrPages + 1);
1013
1014 for (i = 0; i < NrPages; i++)
1015 {
1016 Page = MmAllocPage(MC_NPPOOL, 0);
1017 if (Page == NULL)
1018 {
1019 KeBugCheck(0);
1020 }
1021 MmCreateVirtualMapping(NULL,
1022 Address + (i * PAGESIZE),
1023 PAGE_READWRITE | PAGE_SYSTEM,
1024 (ULONG)Page);
1025 }
1026
1027 *((PULONG)((ULONG)Address + (NrPages * PAGESIZE) - Size)) = NrPages;
1028 return((PVOID)((ULONG)Address + (NrPages * PAGESIZE) - UserSize));
1029 }
1030
1031 VOID STDCALL
1032 ExFreeWholePageBlock(PVOID Addr)
1033 {
1034 ULONG NrPages;
1035
1036 if ((ULONG)Addr < kernel_pool_base ||
1037 (ULONG)Addr >= (kernel_pool_base + NONPAGED_POOL_SIZE))
1038 {
1039 DbgPrint("Block %x found outside pool area\n", Addr);
1040 KeBugCheck(0);
1041 }
1042 NrPages = *(PULONG)((ULONG)Addr - sizeof(ULONG));
1043 MiFreeNonPagedPoolRegion((PVOID)PAGE_ROUND_DOWN((ULONG)Addr), NrPages, TRUE);
1044 }
1045
1046 #endif /* WHOLE_PAGE_ALLOCATIONS */
1047
1048 /* EOF */