- Fixed whole page allocations.
[reactos.git] / reactos / ntoskrnl / mm / npool.c
1 /* $Id: npool.c,v 1.74 2003/08/19 23:52:36 dwelch Exp $
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/npool.c
6 * PURPOSE: Implements the kernel memory pool
7 * PROGRAMMER: David Welch (welch@cwcom.net)
8 * UPDATE HISTORY:
9 * 27/05/98: Created
10 * 10/06/98: Bug fixes by Iwan Fatahi (i_fatahi@hotmail.com)
11 * in take_block (if current bigger than required)
12 * in remove_from_used_list
13 * in ExFreePool
14 * 23/08/98: Fixes from Robert Bergkvist (fragdance@hotmail.com)
15 */
16
17 /* INCLUDES ****************************************************************/
18
19 #include <ddk/ntddk.h>
20 #include <internal/mm.h>
21 #include <internal/ntoskrnl.h>
22 #include <internal/pool.h>
23
24 #define NDEBUG
25 #include <internal/debug.h>
26
27 /* Enable strict checking of the nonpaged pool on every allocation */
28 //#define ENABLE_VALIDATE_POOL
29
30 /* Enable tracking of statistics about the tagged blocks in the pool */
31 #define TAG_STATISTICS_TRACKING
32
33 /*
34 * Put each block in its own range of pages and position the block at the
35 * end of the range so any accesses beyond the end of block are to invalid
36 * memory locations.
37 */
38 /*#define WHOLE_PAGE_ALLOCATIONS*/
39
40 #ifdef ENABLE_VALIDATE_POOL
41 #define VALIDATE_POOL validate_kernel_pool()
42 #else
43 #define VALIDATE_POOL
44 #endif
45
46 #if 0
47 #define POOL_TRACE(args...) do { DbgPrint(args); } while(0);
48 #else
49 #define POOL_TRACE(args...)
50 #endif
51
52 /* avl types ****************************************************************/
53
54 /* FIXME:
55 * This declarations should be moved into a separate header file.
56 */
57
58 typedef struct _NODE
59 {
60 struct _NODE* link[2];
61 struct _NODE* parent;
62 signed char balance;
63 } NODE, *PNODE;
64
65 /* TYPES *******************************************************************/
66
67 #define BLOCK_HDR_USED_MAGIC (0xdeadbeef)
68 #define BLOCK_HDR_FREE_MAGIC (0xceadbeef)
69
70 /*
71 * fields present at the start of a block (this is for internal use only)
72 */
73 typedef struct _BLOCK_HDR
74 {
75 ULONG Magic;
76 ULONG Size;
77 LIST_ENTRY ListEntry;
78 LIST_ENTRY AddressList;
79 LIST_ENTRY TagListEntry;
80 NODE Node;
81 ULONG Tag;
82 PVOID Caller;
83 BOOLEAN Dumped;
84 } BLOCK_HDR;
85
86 PVOID STDCALL
87 ExAllocateWholePageBlock(ULONG Size);
88 VOID STDCALL
89 ExFreeWholePageBlock(PVOID Addr);
90
91 /* GLOBALS *****************************************************************/
92
93 extern PVOID MiNonPagedPoolStart;
94 extern ULONG MiNonPagedPoolLength;
95 static ULONG MiCurrentNonPagedPoolLength = 0;
96
97 /*
98 * Head of the list of free blocks
99 */
100 static PNODE FreeBlockListRoot = NULL;
101
102 /*
103 * Head of the list of in use block
104 */
105 static LIST_ENTRY UsedBlockListHead;
106
107 static LIST_ENTRY AddressListHead;
108
109 #ifndef WHOLE_PAGE_ALLOCATIONS
110 /*
111 * Count of free blocks
112 */
113 static ULONG EiNrFreeBlocks = 0;
114
115 /*
116 * Count of used blocks
117 */
118 static ULONG EiNrUsedBlocks = 0;
119 #endif
120
121 /*
122 * Lock that protects the non-paged pool data structures
123 */
124 static KSPIN_LOCK MmNpoolLock;
125
126 /*
127 * Total memory used for free nonpaged pool blocks
128 */
129 ULONG EiFreeNonPagedPool = 0;
130
131 /*
132 * Total memory used for nonpaged pool blocks
133 */
134 ULONG EiUsedNonPagedPool = 0;
135
136 /*
137 * Allocate a range of memory in the nonpaged pool
138 */
139 PVOID
140 MiAllocNonPagedPoolRegion(unsigned int nr_pages);
141
142 VOID
143 MiFreeNonPagedPoolRegion(PVOID Addr, ULONG Count, BOOLEAN Free);
144
145 #ifdef TAG_STATISTICS_TRACKING
146 #define TAG_HASH_TABLE_SIZE (1024)
147 static LIST_ENTRY tag_hash_table[TAG_HASH_TABLE_SIZE];
148 #endif /* TAG_STATISTICS_TRACKING */
149
150 #ifdef WHOLE_PAGE_ALLOCATIONS
151 static UCHAR NonPagedPoolAllocMapBuffer[ROUND_UP(MM_NONPAGED_POOL_SIZE / PAGE_SIZE, 32) / 8];
152 static RTL_BITMAP NonPagedPoolAllocMap;
153 static ULONG NonPagedPoolAllocMapHint;
154 #endif /* WHOLE_PAGE_ALLOCATIONS */
155
156 /* avl helper functions ****************************************************/
157
158 void DumpFreeBlockNode(PNODE p)
159 {
160 static int count = 0;
161 BLOCK_HDR* blk;
162
163 count++;
164
165 if (p)
166 {
167 DumpFreeBlockNode(p->link[0]);
168 blk = CONTAINING_RECORD(p, BLOCK_HDR, Node);
169 DbgPrint("%08x %8d (%d)\n", blk, blk->Size, count);
170 DumpFreeBlockNode(p->link[1]);
171 }
172
173 count--;
174 }
175 void DumpFreeBlockTree(void)
176 {
177 DbgPrint("--- Begin tree ------------------\n");
178 DbgPrint("%08x\n", CONTAINING_RECORD(FreeBlockListRoot, BLOCK_HDR, Node));
179 DumpFreeBlockNode(FreeBlockListRoot);
180 DbgPrint("--- End tree --------------------\n");
181 }
182
183 int compare_node(PNODE p1, PNODE p2)
184 {
185 BLOCK_HDR* blk1 = CONTAINING_RECORD(p1, BLOCK_HDR, Node);
186 BLOCK_HDR* blk2 = CONTAINING_RECORD(p2, BLOCK_HDR, Node);
187
188 if (blk1->Size == blk2->Size)
189 {
190 if (blk1 < blk2)
191 {
192 return -1;
193 }
194 if (blk1 > blk2)
195 {
196 return 1;
197 }
198 }
199 else
200 {
201 if (blk1->Size < blk2->Size)
202 {
203 return -1;
204 }
205 if (blk1->Size > blk2->Size)
206 {
207 return 1;
208 }
209 }
210 return 0;
211
212 }
213
214 int compare_value(PVOID value, PNODE p)
215 {
216 BLOCK_HDR* blk = CONTAINING_RECORD(p, BLOCK_HDR, Node);
217 ULONG v = *(PULONG)value;
218
219 if (v < blk->Size)
220 {
221 return -1;
222 }
223 if (v > blk->Size)
224 {
225 return 1;
226 }
227 return 0;
228 }
229
230 /* avl functions **********************************************************/
231
232 /* FIXME:
233 * The avl functions should be moved into a separate file.
234 */
235
236 /* The avl_insert and avl_remove are based on libavl (library for manipulation of binary trees). */
237
238 void avl_insert (PNODE * root, PNODE n, int (*compare)(PNODE, PNODE))
239 {
240 PNODE y; /* Top node to update balance factor, and parent. */
241 PNODE p, q; /* Iterator, and parent. */
242 PNODE w; /* New root of rebalanced subtree. */
243 int dir; /* Direction to descend. */
244
245 n->link[0] = n->link[1] = n->parent = NULL;
246 n->balance = 0;
247
248 y = *root;
249 for (q = NULL, p = *root; p != NULL; q = p, p = p->link[dir])
250 {
251 dir = compare(n, p) > 0;
252 if (p->balance != 0)
253 {
254 y = p;
255 }
256 }
257
258 n->parent = q;
259 if (q != NULL)
260 {
261 q->link[dir] = n;
262 }
263 else
264 {
265 *root = n;
266 }
267
268 if (*root == n)
269 {
270 return;
271 }
272
273 for (p = n; p != y; p = q)
274 {
275 q = p->parent;
276 dir = q->link[0] != p;
277 if (dir == 0)
278 {
279 q->balance--;
280 }
281 else
282 {
283 q->balance++;
284 }
285 }
286
287 if (y->balance == -2)
288 {
289 PNODE x = y->link[0];
290 if (x->balance == -1)
291 {
292 w = x;
293 y->link[0] = x->link[1];
294 x->link[1] = y;
295 x->balance = y->balance = 0;
296 x->parent = y->parent;
297 y->parent = x;
298 if (y->link[0] != NULL)
299 {
300 y->link[0]->parent = y;
301 }
302 }
303 else
304 {
305 assert (x->balance == +1);
306 w = x->link[1];
307 x->link[1] = w->link[0];
308 w->link[0] = x;
309 y->link[0] = w->link[1];
310 w->link[1] = y;
311 if (w->balance == -1)
312 {
313 x->balance = 0;
314 y->balance = +1;
315 }
316 else if (w->balance == 0)
317 {
318 x->balance = y->balance = 0;
319 }
320 else /* |w->pavl_balance == +1| */
321 {
322 x->balance = -1;
323 y->balance = 0;
324 }
325 w->balance = 0;
326 w->parent = y->parent;
327 x->parent = y->parent = w;
328 if (x->link[1] != NULL)
329 {
330 x->link[1]->parent = x;
331 }
332 if (y->link[0] != NULL)
333 {
334 y->link[0]->parent = y;
335 }
336 }
337 }
338 else if (y->balance == +2)
339 {
340 PNODE x = y->link[1];
341 if (x->balance == +1)
342 {
343 w = x;
344 y->link[1] = x->link[0];
345 x->link[0] = y;
346 x->balance = y->balance = 0;
347 x->parent = y->parent;
348 y->parent = x;
349 if (y->link[1] != NULL)
350 {
351 y->link[1]->parent = y;
352 }
353 }
354 else
355 {
356 assert (x->balance == -1);
357 w = x->link[0];
358 x->link[0] = w->link[1];
359 w->link[1] = x;
360 y->link[1] = w->link[0];
361 w->link[0] = y;
362 if (w->balance == 1)
363 {
364 x->balance = 0;
365 y->balance = -1;
366 }
367 else if (w->balance == 0)
368 {
369 x->balance = y->balance = 0;
370 }
371 else /* |w->pavl_balance == -1| */
372 {
373 x->balance = +1;
374 y->balance = 0;
375 }
376 w->balance = 0;
377 w->parent = y->parent;
378 x->parent = y->parent = w;
379 if (x->link[0] != NULL)
380 {
381 x->link[0]->parent = x;
382 }
383 if (y->link[1] != NULL)
384 {
385 y->link[1]->parent = y;
386 }
387 }
388 }
389 else
390 {
391 return;
392 }
393 if (w->parent != NULL)
394 {
395 w->parent->link[y != w->parent->link[0]] = w;
396 }
397 else
398 {
399 *root = w;
400 }
401
402 return;
403 }
404
405 void avl_remove (PNODE *root, PNODE item, int (*compare)(PNODE, PNODE))
406 {
407 PNODE p; /* Traverses tree to find node to delete. */
408 PNODE q; /* Parent of |p|. */
409 int dir; /* Side of |q| on which |p| is linked. */
410
411 if (root == NULL || *root == NULL)
412 {
413 return ;
414 }
415
416 p = item;
417 q = p->parent;
418 if (q == NULL)
419 {
420 q = (PNODE) root;
421 dir = 0;
422 }
423 else
424 {
425 dir = compare(p, q) > 0;
426 }
427
428 if (p->link[1] == NULL)
429 {
430 q->link[dir] = p->link[0];
431 if (q->link[dir] != NULL)
432 {
433 q->link[dir]->parent = p->parent;
434 }
435 }
436 else
437 {
438 PNODE r = p->link[1];
439 if (r->link[0] == NULL)
440 {
441 r->link[0] = p->link[0];
442 q->link[dir] = r;
443 r->parent = p->parent;
444 if (r->link[0] != NULL)
445 {
446 r->link[0]->parent = r;
447 }
448 r->balance = p->balance;
449 q = r;
450 dir = 1;
451 }
452 else
453 {
454 PNODE s = r->link[0];
455 while (s->link[0] != NULL)
456 {
457 s = s->link[0];
458 }
459 r = s->parent;
460 r->link[0] = s->link[1];
461 s->link[0] = p->link[0];
462 s->link[1] = p->link[1];
463 q->link[dir] = s;
464 if (s->link[0] != NULL)
465 {
466 s->link[0]->parent = s;
467 }
468 s->link[1]->parent = s;
469 s->parent = p->parent;
470 if (r->link[0] != NULL)
471 {
472 r->link[0]->parent = r;
473 }
474 s->balance = p->balance;
475 q = r;
476 dir = 0;
477 }
478 }
479
480 item->link[0] = item->link[1] = item->parent = NULL;
481 item->balance = 0;
482
483 while (q != (PNODE) root)
484 {
485 PNODE y = q;
486
487 if (y->parent != NULL)
488 {
489 q = y->parent;
490 }
491 else
492 {
493 q = (PNODE) root;
494 }
495
496 if (dir == 0)
497 {
498 dir = q->link[0] != y;
499 y->balance++;
500 if (y->balance == +1)
501 {
502 break;
503 }
504 else if (y->balance == +2)
505 {
506 PNODE x = y->link[1];
507 if (x->balance == -1)
508 {
509 PNODE w;
510
511 assert (x->balance == -1);
512 w = x->link[0];
513 x->link[0] = w->link[1];
514 w->link[1] = x;
515 y->link[1] = w->link[0];
516 w->link[0] = y;
517 if (w->balance == +1)
518 {
519 x->balance = 0;
520 y->balance = -1;
521 }
522 else if (w->balance == 0)
523 {
524 x->balance = y->balance = 0;
525 }
526 else /* |w->pavl_balance == -1| */
527 {
528 x->balance = +1;
529 y->balance = 0;
530 }
531 w->balance = 0;
532 w->parent = y->parent;
533 x->parent = y->parent = w;
534 if (x->link[0] != NULL)
535 {
536 x->link[0]->parent = x;
537 }
538 if (y->link[1] != NULL)
539 {
540 y->link[1]->parent = y;
541 }
542 q->link[dir] = w;
543 }
544 else
545 {
546 y->link[1] = x->link[0];
547 x->link[0] = y;
548 x->parent = y->parent;
549 y->parent = x;
550 if (y->link[1] != NULL)
551 {
552 y->link[1]->parent = y;
553 }
554 q->link[dir] = x;
555 if (x->balance == 0)
556 {
557 x->balance = -1;
558 y->balance = +1;
559 break;
560 }
561 else
562 {
563 x->balance = y->balance = 0;
564 y = x;
565 }
566 }
567 }
568 }
569 else
570 {
571 dir = q->link[0] != y;
572 y->balance--;
573 if (y->balance == -1)
574 {
575 break;
576 }
577 else if (y->balance == -2)
578 {
579 PNODE x = y->link[0];
580 if (x->balance == +1)
581 {
582 PNODE w;
583 assert (x->balance == +1);
584 w = x->link[1];
585 x->link[1] = w->link[0];
586 w->link[0] = x;
587 y->link[0] = w->link[1];
588 w->link[1] = y;
589 if (w->balance == -1)
590 {
591 x->balance = 0;
592 y->balance = +1;
593 }
594 else if (w->balance == 0)
595 {
596 x->balance = y->balance = 0;
597 }
598 else /* |w->pavl_balance == +1| */
599 {
600 x->balance = -1;
601 y->balance = 0;
602 }
603 w->balance = 0;
604 w->parent = y->parent;
605 x->parent = y->parent = w;
606 if (x->link[1] != NULL)
607 {
608 x->link[1]->parent = x;
609 }
610 if (y->link[0] != NULL)
611 {
612 y->link[0]->parent = y;
613 }
614 q->link[dir] = w;
615 }
616 else
617 {
618 y->link[0] = x->link[1];
619 x->link[1] = y;
620 x->parent = y->parent;
621 y->parent = x;
622 if (y->link[0] != NULL)
623 {
624 y->link[0]->parent = y;
625 }
626 q->link[dir] = x;
627 if (x->balance == 0)
628 {
629 x->balance = +1;
630 y->balance = -1;
631 break;
632 }
633 else
634 {
635 x->balance = y->balance = 0;
636 y = x;
637 }
638 }
639 }
640 }
641 }
642
643 }
644
645 PNODE avl_get_next(PNODE root, PNODE p)
646 {
647 PNODE q;
648 if (p->link[1])
649 {
650 p = p->link[1];
651 while(p->link[0])
652 {
653 p = p->link[0];
654 }
655 return p;
656 }
657 else
658 {
659 q = p->parent;
660 while (q && q->link[1] == p)
661 {
662 p = q;
663 q = q->parent;
664 }
665 if (q == NULL)
666 {
667 return NULL;
668 }
669 else
670 {
671 return q;
672 }
673 }
674 }
675
676 PNODE avl_find_equal_or_greater(PNODE root, ULONG size, int (compare)(PVOID, PNODE))
677 {
678 PNODE p;
679 PNODE prev = NULL;
680 int cmp;
681
682 for (p = root; p != NULL;)
683 {
684 cmp = compare((PVOID)&size, p);
685 if (cmp < 0)
686 {
687 prev = p;
688 p = p->link[0];
689 }
690 else if (cmp > 0)
691 {
692 p = p->link[1];
693 }
694 else
695 {
696 while (p->link[0])
697 {
698 cmp = compare((PVOID)&size, p->link[0]);
699 if (cmp != 0)
700 {
701 break;
702 }
703 p = p->link[0];
704 }
705 return p;
706 }
707 }
708 return prev;
709 }
710
711 /* non paged pool functions ************************************************/
712
713 #ifdef TAG_STATISTICS_TRACKING
714 VOID
715 MiRemoveFromTagHashTable(BLOCK_HDR* block)
716 /*
717 * Remove a block from the tag hash table
718 */
719 {
720 if (block->Tag == 0)
721 {
722 return;
723 }
724
725 RemoveEntryList(&block->TagListEntry);
726 }
727
728 VOID
729 MiAddToTagHashTable(BLOCK_HDR* block)
730 /*
731 * Add a block to the tag hash table
732 */
733 {
734 ULONG hash;
735
736 if (block->Tag == 0)
737 {
738 return;
739 }
740
741 hash = block->Tag % TAG_HASH_TABLE_SIZE;
742
743 InsertHeadList(&tag_hash_table[hash], &block->TagListEntry);
744 }
745 #endif /* TAG_STATISTICS_TRACKING */
746
747 VOID
748 MiInitializeNonPagedPool(VOID)
749 {
750 #ifdef TAG_STATISTICS_TRACKING
751 ULONG i;
752 for (i = 0; i < TAG_HASH_TABLE_SIZE; i++)
753 {
754 InitializeListHead(&tag_hash_table[i]);
755 }
756 #endif
757 MiCurrentNonPagedPoolLength = 0;
758 KeInitializeSpinLock(&MmNpoolLock);
759 InitializeListHead(&UsedBlockListHead);
760 InitializeListHead(&AddressListHead);
761 FreeBlockListRoot = NULL;
762 #ifdef WHOLE_PAGE_ALLOCATIONS
763 RtlInitializeBitMap(&NonPagedPoolAllocMap, (PVOID)&NonPagedPoolAllocMapBuffer, MM_NONPAGED_POOL_SIZE / PAGE_SIZE);
764 RtlClearAllBits(&NonPagedPoolAllocMap);
765 NonPagedPoolAllocMapHint = 0;
766 #endif
767 }
768
769 #if defined(TAG_STATISTICS_TRACKING) && !defined(WHOLE_PAGE_ALLOCATIONS)
770 VOID STATIC
771 MiDumpTagStats(ULONG CurrentTag, ULONG CurrentNrBlocks, ULONG CurrentSize)
772 {
773 CHAR c1, c2, c3, c4;
774
775 c1 = (CurrentTag >> 24) & 0xFF;
776 c2 = (CurrentTag >> 16) & 0xFF;
777 c3 = (CurrentTag >> 8) & 0xFF;
778 c4 = CurrentTag & 0xFF;
779
780 if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
781 {
782 DbgPrint("Tag %x (%c%c%c%c) Blocks %d Total Size %d Average Size %d\n",
783 CurrentTag, c4, c3, c2, c1, CurrentNrBlocks,
784 CurrentSize, CurrentSize / CurrentNrBlocks);
785 }
786 else
787 {
788 DbgPrint("Tag %x Blocks %d Total Size %d Average Size %d\n",
789 CurrentTag, CurrentNrBlocks, CurrentSize,
790 CurrentSize / CurrentNrBlocks);
791 }
792 }
793 #endif /* defined(TAG_STATISTICS_TRACKING) && !defined(WHOLE_PAGE_ALLOCATIONS); */
794
795 VOID
796 MiDebugDumpNonPagedPoolStats(BOOLEAN NewOnly)
797 {
798 #if defined(TAG_STATISTICS_TRACKING) && !defined(WHOLE_PAGE_ALLOCATIONS)
799 ULONG i;
800 BLOCK_HDR* current;
801 ULONG CurrentTag;
802 ULONG CurrentNrBlocks;
803 ULONG CurrentSize;
804 ULONG TotalBlocks;
805 ULONG TotalSize;
806 LIST_ENTRY tmpListHead;
807 PLIST_ENTRY current_entry;
808
809 DbgPrint("******* Dumping non paging pool stats ******\n");
810 TotalBlocks = 0;
811 TotalSize = 0;
812 for (i = 0; i < TAG_HASH_TABLE_SIZE; i++)
813 {
814 InitializeListHead(&tmpListHead);
815
816 while (!IsListEmpty(&tag_hash_table[i]))
817 {
818 CurrentTag = 0;
819
820 current_entry = tag_hash_table[i].Flink;
821 while (current_entry != &tag_hash_table[i])
822 {
823 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, TagListEntry);
824 current_entry = current_entry->Flink;
825 if (CurrentTag == 0)
826 {
827 CurrentTag = current->Tag;
828 CurrentNrBlocks = 0;
829 CurrentSize = 0;
830 }
831 if (current->Tag == CurrentTag)
832 {
833 RemoveEntryList(&current->TagListEntry);
834 InsertHeadList(&tmpListHead, &current->TagListEntry);
835 if (!NewOnly || !current->Dumped)
836 {
837 CurrentNrBlocks++;
838 TotalBlocks++;
839 CurrentSize += current->Size;
840 TotalSize += current->Size;
841 current->Dumped = TRUE;
842 }
843 }
844 }
845 if (CurrentTag != 0 && CurrentNrBlocks != 0)
846 {
847 MiDumpTagStats(CurrentTag, CurrentNrBlocks, CurrentSize);
848 }
849 }
850 if (!IsListEmpty(&tmpListHead))
851 {
852 tag_hash_table[i].Flink = tmpListHead.Flink;
853 tag_hash_table[i].Flink->Blink = &tag_hash_table[i];
854 tag_hash_table[i].Blink = tmpListHead.Blink;
855 tag_hash_table[i].Blink->Flink = &tag_hash_table[i];
856 }
857 }
858 if (TotalBlocks != 0)
859 {
860 DbgPrint("TotalBlocks %d TotalSize %d AverageSize %d\n",
861 TotalBlocks, TotalSize, TotalSize / TotalBlocks);
862 }
863 else
864 {
865 DbgPrint("TotalBlocks %d TotalSize %d\n",
866 TotalBlocks, TotalSize);
867 }
868 DbgPrint("Freeblocks %d TotalFreeSize %d AverageFreeSize %d\n",
869 EiNrFreeBlocks, EiFreeNonPagedPool, EiNrFreeBlocks ? EiFreeNonPagedPool / EiNrFreeBlocks : 0);
870 DbgPrint("***************** Dump Complete ***************\n");
871 #endif /* defined(TAG_STATISTICS_TRACKING) && !defined(WHOLE_PAGE_ALLOCATIONS) */
872 }
873
874 VOID
875 MiDebugDumpNonPagedPool(BOOLEAN NewOnly)
876 {
877 BLOCK_HDR* current;
878 PLIST_ENTRY current_entry;
879 KIRQL oldIrql;
880
881 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
882
883 DbgPrint("******* Dumping non paging pool contents ******\n");
884 current_entry = UsedBlockListHead.Flink;
885 while (current_entry != &UsedBlockListHead)
886 {
887 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
888 if (!NewOnly || !current->Dumped)
889 {
890 CHAR c1, c2, c3, c4;
891
892 c1 = (current->Tag >> 24) & 0xFF;
893 c2 = (current->Tag >> 16) & 0xFF;
894 c3 = (current->Tag >> 8) & 0xFF;
895 c4 = current->Tag & 0xFF;
896
897 if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
898 {
899 DbgPrint("Size 0x%x Tag 0x%x (%c%c%c%c) Allocator 0x%x\n",
900 current->Size, current->Tag, c4, c3, c2, c1,
901 current->Caller);
902 }
903 else
904 {
905 DbgPrint("Size 0x%x Tag 0x%x Allocator 0x%x\n",
906 current->Size, current->Tag, current->Caller);
907 }
908 current->Dumped = TRUE;
909 }
910 current_entry = current_entry->Flink;
911 }
912 DbgPrint("***************** Dump Complete ***************\n");
913 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
914 }
915
916 #ifndef WHOLE_PAGE_ALLOCATIONS
917
918 #ifdef ENABLE_VALIDATE_POOL
919 static void validate_free_list(void)
920 /*
921 * FUNCTION: Validate the integrity of the list of free blocks
922 */
923 {
924 BLOCK_HDR* current;
925 PLIST_ENTRY current_entry;
926 unsigned int blocks_seen=0;
927
928 current_entry = MiFreeBlockListHead.Flink;
929 while (current_entry != &MiFreeBlockListHead)
930 {
931 PVOID base_addr;
932
933 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
934 base_addr = (PVOID)current;
935
936 if (current->Magic != BLOCK_HDR_FREE_MAGIC)
937 {
938 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
939 current);
940 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
941 }
942
943 if (base_addr < MiNonPagedPoolStart ||
944 MiNonPagedPoolStart + current->Size > MiNonPagedPoolStart + MiCurrentNonPagedPoolLength)
945 {
946 DbgPrint("Block %x found outside pool area\n",current);
947 DbgPrint("Size %d\n",current->Size);
948 DbgPrint("Limits are %x %x\n",MiNonPagedPoolStart,
949 MiNonPagedPoolStart+MiCurrentNonPagedPoolLength);
950 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
951 }
952 blocks_seen++;
953 if (blocks_seen > MiNrFreeBlocks)
954 {
955 DbgPrint("Too many blocks on free list\n");
956 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
957 }
958 if (current->ListEntry.Flink != &MiFreeBlockListHead &&
959 current->ListEntry.Flink->Blink != &current->ListEntry)
960 {
961 DbgPrint("%s:%d:Break in list (current %x next %x "
962 "current->next->previous %x)\n",
963 __FILE__,__LINE__,current, current->ListEntry.Flink,
964 current->ListEntry.Flink->Blink);
965 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
966 }
967
968 current_entry = current_entry->Flink;
969 }
970 }
971
972 static void validate_used_list(void)
973 /*
974 * FUNCTION: Validate the integrity of the list of used blocks
975 */
976 {
977 BLOCK_HDR* current;
978 PLIST_ENTRY current_entry;
979 unsigned int blocks_seen=0;
980
981 current_entry = UsedBlockListHead.Flink;
982 while (current_entry != &UsedBlockListHead)
983 {
984 PVOID base_addr;
985
986 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
987 base_addr = (PVOID)current;
988
989 if (current->Magic != BLOCK_HDR_USED_MAGIC)
990 {
991 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
992 current);
993 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
994 }
995 if (base_addr < MiNonPagedPoolStart ||
996 (base_addr+current->Size) >
997 MiNonPagedPoolStart+MiCurrentNonPagedPoolLength)
998 {
999 DbgPrint("Block %x found outside pool area\n",current);
1000 for(;;);
1001 }
1002 blocks_seen++;
1003 if (blocks_seen > EiNrUsedBlocks)
1004 {
1005 DbgPrint("Too many blocks on used list\n");
1006 for(;;);
1007 }
1008 if (current->ListEntry.Flink != &UsedBlockListHead &&
1009 current->ListEntry.Flink->Blink != &current->ListEntry)
1010 {
1011 DbgPrint("Break in list (current %x next %x)\n",
1012 current, current->ListEntry.Flink);
1013 for(;;);
1014 }
1015
1016 current_entry = current_entry->Flink;
1017 }
1018 }
1019
1020 static void check_duplicates(BLOCK_HDR* blk)
1021 /*
1022 * FUNCTION: Check a block has no duplicates
1023 * ARGUMENTS:
1024 * blk = block to check
1025 * NOTE: Bug checks if duplicates are found
1026 */
1027 {
1028 char* base = (char*)blk;
1029 char* last = ((char*)blk) + +sizeof(BLOCK_HDR) + blk->Size;
1030 BLOCK_HDR* current;
1031 PLIST_ENTRY current_entry;
1032
1033 current_entry = MiFreeBlockListHead.Flink;
1034 while (current_entry != &MiFreeBlockListHead)
1035 {
1036 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
1037
1038 if (current->Magic != BLOCK_HDR_FREE_MAGIC)
1039 {
1040 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
1041 current);
1042 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1043 }
1044
1045 if ( (char*)current > base && (char*)current < last )
1046 {
1047 DbgPrint("intersecting blocks on list\n");
1048 for(;;);
1049 }
1050 if ( (char*)current < base &&
1051 ((char*)current + current->Size + sizeof(BLOCK_HDR))
1052 > base )
1053 {
1054 DbgPrint("intersecting blocks on list\n");
1055 for(;;);
1056 }
1057
1058 current_entry = current_entry->Flink;
1059 }
1060
1061 current_entry = UsedBlockListHead.Flink;
1062 while (current_entry != &UsedBlockListHead)
1063 {
1064 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
1065
1066 if ( (char*)current > base && (char*)current < last )
1067 {
1068 DbgPrint("intersecting blocks on list\n");
1069 for(;;);
1070 }
1071 if ( (char*)current < base &&
1072 ((char*)current + current->Size + sizeof(BLOCK_HDR))
1073 > base )
1074 {
1075 DbgPrint("intersecting blocks on list\n");
1076 for(;;);
1077 }
1078
1079 current_entry = current_entry->Flink;
1080 }
1081
1082 }
1083
1084 static void validate_kernel_pool(void)
1085 /*
1086 * FUNCTION: Checks the integrity of the kernel memory heap
1087 */
1088 {
1089 BLOCK_HDR* current;
1090 PLIST_ENTRY current_entry;
1091
1092 validate_free_list();
1093 validate_used_list();
1094
1095 current_entry = MiFreeBlockListHead.Flink;
1096 while (current_entry != &MiFreeBlockListHead)
1097 {
1098 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
1099 check_duplicates(current);
1100 current_entry = current_entry->Flink;
1101 }
1102 current_entry = UsedBlockListHead.Flink;
1103 while (current_entry != &UsedBlockListHead)
1104 {
1105 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
1106 check_duplicates(current);
1107 current_entry = current_entry->Flink;
1108 }
1109 }
1110 #endif
1111
1112 #if 0
1113 STATIC VOID
1114 free_pages(BLOCK_HDR* blk)
1115 {
1116 ULONG start;
1117 ULONG end;
1118 ULONG i;
1119
1120 start = (ULONG)blk;
1121 end = (ULONG)blk + sizeof(BLOCK_HDR) + blk->Size;
1122
1123 /*
1124 * If the block doesn't contain a whole page then there is nothing to do
1125 */
1126 if (PAGE_ROUND_UP(start) >= PAGE_ROUND_DOWN(end))
1127 {
1128 return;
1129 }
1130 }
1131 #endif
1132
1133 static void remove_from_used_list(BLOCK_HDR* current)
1134 {
1135 RemoveEntryList(&current->ListEntry);
1136 EiUsedNonPagedPool -= current->Size;
1137 EiNrUsedBlocks--;
1138 }
1139
1140 static void remove_from_free_list(BLOCK_HDR* current)
1141 {
1142 DPRINT("remove_from_free_list %d\n", current->Size);
1143
1144 avl_remove(&FreeBlockListRoot, &current->Node, compare_node);
1145
1146 EiFreeNonPagedPool -= current->Size;
1147 EiNrFreeBlocks--;
1148 DPRINT("remove_from_free_list done\n");
1149 #ifdef DUMP_AVL
1150 DumpFreeBlockTree();
1151 #endif
1152 }
1153
1154 static void
1155 add_to_free_list(BLOCK_HDR* blk)
1156 /*
1157 * FUNCTION: add the block to the free list (internal)
1158 */
1159 {
1160 BLOCK_HDR* current;
1161
1162 DPRINT("add_to_free_list (%d)\n", blk->Size);
1163
1164 EiNrFreeBlocks++;
1165
1166 if (blk->AddressList.Blink != &AddressListHead)
1167 {
1168 current = CONTAINING_RECORD(blk->AddressList.Blink, BLOCK_HDR, AddressList);
1169 if (current->Magic == BLOCK_HDR_FREE_MAGIC &&
1170 (PVOID)current + current->Size + sizeof(BLOCK_HDR) == (PVOID)blk)
1171 {
1172 CHECKPOINT;
1173 remove_from_free_list(current);
1174 RemoveEntryList(&blk->AddressList);
1175 current->Size = current->Size + sizeof(BLOCK_HDR) + blk->Size;
1176 current->Magic = BLOCK_HDR_USED_MAGIC;
1177 memset(blk, 0xcc, sizeof(BLOCK_HDR));
1178 blk = current;
1179 }
1180 }
1181
1182 if (blk->AddressList.Flink != &AddressListHead)
1183 {
1184 current = CONTAINING_RECORD(blk->AddressList.Flink, BLOCK_HDR, AddressList);
1185 if (current->Magic == BLOCK_HDR_FREE_MAGIC &&
1186 (PVOID)blk + blk->Size + sizeof(BLOCK_HDR) == (PVOID)current)
1187 {
1188 CHECKPOINT;
1189 remove_from_free_list(current);
1190 RemoveEntryList(&current->AddressList);
1191 blk->Size = blk->Size + sizeof(BLOCK_HDR) + current->Size;
1192 memset(current, 0xcc, sizeof(BLOCK_HDR));
1193 }
1194 }
1195
1196 DPRINT("%d\n", blk->Size);
1197 blk->Magic = BLOCK_HDR_FREE_MAGIC;
1198 EiFreeNonPagedPool += blk->Size;
1199 avl_insert(&FreeBlockListRoot, &blk->Node, compare_node);
1200
1201 DPRINT("add_to_free_list done\n");
1202 #ifdef DUMP_AVL
1203 DumpFreeBlockTree();
1204 #endif
1205 }
1206
1207 static void add_to_used_list(BLOCK_HDR* blk)
1208 /*
1209 * FUNCTION: add the block to the used list (internal)
1210 */
1211 {
1212 InsertHeadList(&UsedBlockListHead, &blk->ListEntry);
1213 EiUsedNonPagedPool += blk->Size;
1214 EiNrUsedBlocks++;
1215 }
1216
1217 inline static void* block_to_address(BLOCK_HDR* blk)
1218 /*
1219 * FUNCTION: Translate a block header address to the corresponding block
1220 * address (internal)
1221 */
1222 {
1223 return ( (void *) ((char*)blk + sizeof(BLOCK_HDR)) );
1224 }
1225
1226 inline static BLOCK_HDR* address_to_block(void* addr)
1227 {
1228 return (BLOCK_HDR *)
1229 ( ((char*)addr) - sizeof(BLOCK_HDR) );
1230 }
1231
1232 static BLOCK_HDR* lookup_block(unsigned int size)
1233 {
1234 BLOCK_HDR* current;
1235 BLOCK_HDR* best = NULL;
1236 ULONG new_size;
1237 PVOID block, block_boundary;
1238 PNODE p;
1239
1240 DPRINT("lookup_block %d\n", size);
1241
1242 if (size < PAGE_SIZE)
1243 {
1244 p = avl_find_equal_or_greater(FreeBlockListRoot, size, compare_value);
1245 if (p)
1246 {
1247 best = CONTAINING_RECORD(p, BLOCK_HDR, Node);
1248 }
1249 }
1250 else
1251 {
1252 p = avl_find_equal_or_greater(FreeBlockListRoot, size, compare_value);
1253
1254 while(p)
1255 {
1256 current = CONTAINING_RECORD(p, BLOCK_HDR, Node);
1257 block = block_to_address(current);
1258 block_boundary = (PVOID)PAGE_ROUND_UP((ULONG)block);
1259 new_size = (ULONG)block_boundary - (ULONG)block + size;
1260 if (new_size != size && (ULONG)block_boundary - (ULONG)block < sizeof(BLOCK_HDR))
1261 {
1262 new_size += PAGE_SIZE;
1263 }
1264 if (current->Size >= new_size &&
1265 (best == NULL || current->Size < best->Size))
1266 {
1267 best = current;
1268 }
1269 if (best && current->Size >= size + PAGE_SIZE + 2 * sizeof(BLOCK_HDR))
1270 {
1271 break;
1272 }
1273 p = avl_get_next(FreeBlockListRoot, p);
1274
1275 }
1276 }
1277 DPRINT("lookup_block done %d\n", best ? best->Size : 0);
1278 return best;
1279 }
1280
1281 static void* take_block(BLOCK_HDR* current, unsigned int size,
1282 ULONG Tag, PVOID Caller)
1283 /*
1284 * FUNCTION: Allocate a used block of least 'size' from the specified
1285 * free block
1286 * RETURNS: The address of the created memory block
1287 */
1288 {
1289 BLOCK_HDR* blk;
1290 BOOL Removed = FALSE;
1291
1292 DPRINT("take_block\n");
1293
1294 if (size >= PAGE_SIZE)
1295 {
1296 blk = address_to_block((PVOID)PAGE_ROUND_UP(block_to_address (current)));
1297 if (blk != current)
1298 {
1299 if ((ULONG)blk - (ULONG)current < sizeof(BLOCK_HDR))
1300 {
1301 (ULONG)blk += PAGE_SIZE;
1302 }
1303 assert((ULONG)blk - (ULONG)current + size <= current->Size && (ULONG)blk - (ULONG)current >= sizeof(BLOCK_HDR));
1304
1305 memset(blk, 0, sizeof(BLOCK_HDR));
1306 blk->Magic = BLOCK_HDR_USED_MAGIC;
1307 blk->Size = current->Size - ((ULONG)blk - (ULONG)current);
1308 remove_from_free_list(current);
1309 current->Size -= (blk->Size + sizeof(BLOCK_HDR));
1310 blk->AddressList.Flink = current->AddressList.Flink;
1311 blk->AddressList.Flink->Blink = &blk->AddressList;
1312 blk->AddressList.Blink = &current->AddressList;
1313 current->AddressList.Flink = &blk->AddressList;
1314 add_to_free_list(current);
1315 Removed = TRUE;
1316 current = blk;
1317 }
1318 }
1319 if (Removed == FALSE)
1320 {
1321 remove_from_free_list(current);
1322 }
1323
1324 /*
1325 * If the block is much bigger than required then split it and
1326 * return a pointer to the allocated section. If the difference
1327 * between the sizes is marginal it makes no sense to have the
1328 * extra overhead
1329 */
1330 if (current->Size > size + sizeof(BLOCK_HDR))
1331 {
1332 BLOCK_HDR* free_blk;
1333
1334 /*
1335 * Replace the bigger block with a smaller block in the
1336 * same position in the list
1337 */
1338 free_blk = (BLOCK_HDR *)(((char*)current)
1339 + sizeof(BLOCK_HDR) + size);
1340
1341 free_blk->Size = current->Size - (sizeof(BLOCK_HDR) + size);
1342 current->Size=size;
1343 free_blk->AddressList.Flink = current->AddressList.Flink;
1344 free_blk->AddressList.Flink->Blink = &free_blk->AddressList;
1345 free_blk->AddressList.Blink = &current->AddressList;
1346 current->AddressList.Flink = &free_blk->AddressList;
1347 current->Magic = BLOCK_HDR_USED_MAGIC;
1348 free_blk->Magic = BLOCK_HDR_FREE_MAGIC;
1349 add_to_free_list(free_blk);
1350 add_to_used_list(current);
1351 current->Tag = Tag;
1352 current->Caller = Caller;
1353 current->Dumped = FALSE;
1354 #ifdef TAG_STATISTICS_TRACKING
1355 MiAddToTagHashTable(current);
1356 #endif /* TAG_STATISTICS_TRACKING */
1357 VALIDATE_POOL;
1358 return(block_to_address(current));
1359 }
1360
1361 /*
1362 * Otherwise allocate the whole block
1363 */
1364
1365 current->Magic = BLOCK_HDR_USED_MAGIC;
1366 current->Tag = Tag;
1367 current->Caller = Caller;
1368 current->Dumped = FALSE;
1369 add_to_used_list(current);
1370 #ifdef TAG_STATISTICS_TRACKING
1371 MiAddToTagHashTable(current);
1372 #endif /* TAG_STATISTICS_TRACKING */
1373
1374 VALIDATE_POOL;
1375 return(block_to_address(current));
1376 }
1377
1378 static void* grow_kernel_pool(unsigned int size, ULONG Tag, PVOID Caller)
1379 /*
1380 * FUNCTION: Grow the executive heap to accomodate a block of at least 'size'
1381 * bytes
1382 */
1383 {
1384 ULONG nr_pages = PAGE_ROUND_UP(size + sizeof(BLOCK_HDR)) / PAGE_SIZE;
1385 ULONG start;
1386 BLOCK_HDR* blk=NULL;
1387 BLOCK_HDR* current;
1388 ULONG i;
1389 KIRQL oldIrql;
1390 NTSTATUS Status;
1391 PVOID block = NULL;
1392 PLIST_ENTRY current_entry;
1393
1394 if (size >= PAGE_SIZE)
1395 {
1396 nr_pages ++;
1397 }
1398
1399 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1400 start = (ULONG)MiNonPagedPoolStart + MiCurrentNonPagedPoolLength;
1401 if (MiCurrentNonPagedPoolLength + nr_pages * PAGE_SIZE > MiNonPagedPoolLength)
1402 {
1403 DbgPrint("CRITICAL: Out of non-paged pool space\n");
1404 KEBUGCHECK(0);
1405 }
1406 MiCurrentNonPagedPoolLength += nr_pages * PAGE_SIZE;
1407 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1408
1409 DPRINT("growing heap for block size %d, ",size);
1410 DPRINT("start %x\n",start);
1411
1412 for (i=0;i<nr_pages;i++)
1413 {
1414 PHYSICAL_ADDRESS Page;
1415 /* FIXME: Check whether we can really wait here. */
1416 Status = MmRequestPageMemoryConsumer(MC_NPPOOL, TRUE, &Page);
1417 if (!NT_SUCCESS(Status))
1418 {
1419 KEBUGCHECK(0);
1420 return(NULL);
1421 }
1422 Status = MmCreateVirtualMapping(NULL,
1423 (PVOID)(start + (i*PAGE_SIZE)),
1424 PAGE_READWRITE|PAGE_SYSTEM,
1425 Page,
1426 TRUE);
1427 if (!NT_SUCCESS(Status))
1428 {
1429 DbgPrint("Unable to create virtual mapping\n");
1430 KEBUGCHECK(0);
1431 }
1432 }
1433
1434 blk = (struct _BLOCK_HDR *)start;
1435 memset(blk, 0, sizeof(BLOCK_HDR));
1436 blk->Size = (nr_pages * PAGE_SIZE) - sizeof(BLOCK_HDR);
1437 memset(block_to_address(blk), 0xcc, blk->Size);
1438
1439 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1440 current_entry = AddressListHead.Blink;
1441 while (current_entry != &AddressListHead)
1442 {
1443 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, AddressList);
1444 if ((PVOID)current + current->Size < (PVOID)blk)
1445 {
1446 InsertHeadList(current_entry, &blk->AddressList);
1447 break;
1448 }
1449 current_entry = current_entry->Blink;
1450 }
1451 if (current_entry == &AddressListHead)
1452 {
1453 InsertHeadList(&AddressListHead, &blk->AddressList);
1454 }
1455 blk->Magic = BLOCK_HDR_FREE_MAGIC;
1456 add_to_free_list(blk);
1457 blk = lookup_block(size);
1458 if (blk)
1459 {
1460 block = take_block(blk, size, Tag, Caller);
1461 VALIDATE_POOL;
1462 }
1463 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1464 return block;
1465 }
1466
1467 #endif /* not WHOLE_PAGE_ALLOCATIONS */
1468
1469 VOID STDCALL ExFreeNonPagedPool (PVOID block)
1470 /*
1471 * FUNCTION: Releases previously allocated memory
1472 * ARGUMENTS:
1473 * block = block to free
1474 */
1475 {
1476 #ifdef WHOLE_PAGE_ALLOCATIONS /* WHOLE_PAGE_ALLOCATIONS */
1477 KIRQL oldIrql;
1478
1479 if (block == NULL)
1480 {
1481 return;
1482 }
1483
1484 DPRINT("freeing block %x\n",blk);
1485
1486 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->size,
1487 ((PULONG)&block)[-1]);
1488
1489 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1490
1491 ExFreeWholePageBlock(block);
1492 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1493
1494 #else /* not WHOLE_PAGE_ALLOCATIONS */
1495
1496 BLOCK_HDR* blk=address_to_block(block);
1497 KIRQL oldIrql;
1498
1499 if (block == NULL)
1500 {
1501 return;
1502 }
1503
1504 DPRINT("freeing block %x\n",blk);
1505
1506 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->size,
1507 ((PULONG)&block)[-1]);
1508
1509 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1510
1511 VALIDATE_POOL;
1512
1513 if (blk->Magic != BLOCK_HDR_USED_MAGIC)
1514 {
1515 if (blk->Magic == BLOCK_HDR_FREE_MAGIC)
1516 {
1517 DbgPrint("ExFreePool of already freed address %x\n", block);
1518 }
1519 else
1520 {
1521 DbgPrint("ExFreePool of non-allocated address %x (magic %x)\n",
1522 block, blk->Magic);
1523 }
1524 KEBUGCHECK(0);
1525 return;
1526 }
1527
1528 memset(block, 0xcc, blk->Size);
1529
1530 #ifdef TAG_STATISTICS_TRACKING
1531 MiRemoveFromTagHashTable(blk);
1532 #endif /* TAG_STATISTICS_TRACKING */
1533 remove_from_used_list(blk);
1534 blk->Tag = 0;
1535 blk->Caller = NULL;
1536 blk->TagListEntry.Flink = blk->TagListEntry.Blink = NULL;
1537 blk->Magic = BLOCK_HDR_FREE_MAGIC;
1538 add_to_free_list(blk);
1539 VALIDATE_POOL;
1540 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1541
1542 #endif /* WHOLE_PAGE_ALLOCATIONS */
1543 }
1544
1545 PVOID STDCALL
1546 ExAllocateNonPagedPoolWithTag(ULONG Type, ULONG Size, ULONG Tag, PVOID Caller)
1547 {
1548 #ifdef WHOLE_PAGE_ALLOCATIONS
1549 PVOID block;
1550 KIRQL oldIrql;
1551
1552 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
1553 Size,Caller);
1554
1555 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1556
1557 /*
1558 * accomodate this useful idiom
1559 */
1560 if (Size == 0)
1561 {
1562 POOL_TRACE("= NULL\n");
1563 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1564 return(NULL);
1565 }
1566
1567 block = ExAllocateWholePageBlock(Size);
1568 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1569 return(block);
1570
1571 #else /* not WHOLE_PAGE_ALLOCATIONS */
1572 PVOID block;
1573 BLOCK_HDR* best = NULL;
1574 KIRQL oldIrql;
1575
1576 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
1577 Size,Caller);
1578
1579 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1580
1581 VALIDATE_POOL;
1582
1583 #if 0
1584 /* after some allocations print the npaged pool stats */
1585 #ifdef TAG_STATISTICS_TRACKING
1586 {
1587 static ULONG counter = 0;
1588 if (counter++ % 100000 == 0)
1589 {
1590 MiDebugDumpNonPagedPoolStats(FALSE);
1591 }
1592 }
1593 #endif
1594 #endif
1595 /*
1596 * accomodate this useful idiom
1597 */
1598 if (Size == 0)
1599 {
1600 POOL_TRACE("= NULL\n");
1601 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1602 return(NULL);
1603 }
1604 /* Make the size dword alligned, this makes the block dword alligned */
1605 Size = ROUND_UP(Size, 4);
1606 /*
1607 * Look for an already created block of sufficent size
1608 */
1609 best = lookup_block(Size);
1610 if (best == NULL)
1611 {
1612 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1613 block = grow_kernel_pool(Size, Tag, Caller);
1614 if (block == NULL)
1615 {
1616 DPRINT1("%d\n", Size);
1617 DumpFreeBlockTree();
1618 }
1619 assert(block != NULL);
1620 memset(block,0,Size);
1621 }
1622 else
1623 {
1624 block=take_block(best, Size, Tag, Caller);
1625 VALIDATE_POOL;
1626 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1627 memset(block,0,Size);
1628 }
1629 return(block);
1630 #endif /* WHOLE_PAGE_ALLOCATIONS */
1631 }
1632
1633 #ifdef WHOLE_PAGE_ALLOCATIONS
1634
1635 PVOID STDCALL
1636 ExAllocateWholePageBlock(ULONG Size)
1637 {
1638 PVOID Address;
1639 PHYSICAL_ADDRESS Page;
1640 ULONG i;
1641 ULONG NrPages;
1642 ULONG Base;
1643
1644 NrPages = ROUND_UP(Size, PAGE_SIZE) / PAGE_SIZE;
1645
1646 Base = RtlFindClearBitsAndSet(&NonPagedPoolAllocMap, NrPages + 1, NonPagedPoolAllocMapHint);
1647 if (NonPagedPoolAllocMapHint == Base)
1648 {
1649 NonPagedPoolAllocMapHint += (NrPages + 1);
1650 }
1651 Address = MiNonPagedPoolStart + Base * PAGE_SIZE;
1652
1653 for (i = 0; i < NrPages; i++)
1654 {
1655 Page = MmAllocPage(MC_NPPOOL, 0);
1656 if (Page.QuadPart == 0LL)
1657 {
1658 KEBUGCHECK(0);
1659 }
1660 MmCreateVirtualMapping(NULL,
1661 Address + (i * PAGE_SIZE),
1662 PAGE_READWRITE | PAGE_SYSTEM,
1663 Page,
1664 TRUE);
1665 }
1666
1667 MiCurrentNonPagedPoolLength = max(MiCurrentNonPagedPoolLength, (Base + NrPages) * PAGE_SIZE);
1668 return((PVOID)((PUCHAR)Address + (NrPages * PAGE_SIZE) - Size));
1669 }
1670
1671 VOID STDCALL
1672 ExFreeWholePageBlock(PVOID Addr)
1673 {
1674 ULONG Base;
1675
1676 if (Addr < MiNonPagedPoolStart ||
1677 Addr >= (MiNonPagedPoolStart + MiCurrentNonPagedPoolLength))
1678 {
1679 DbgPrint("Block %x found outside pool area\n", Addr);
1680 KEBUGCHECK(0);
1681 }
1682 Base = (Addr - MiNonPagedPoolStart) / PAGE_SIZE;
1683 NonPagedPoolAllocMapHint = min(NonPagedPoolAllocMapHint, Base);
1684 while (MmIsPagePresent(NULL, Addr))
1685 {
1686 MmDeleteVirtualMapping(NULL,
1687 Addr,
1688 TRUE,
1689 NULL,
1690 NULL);
1691 RtlClearBits(&NonPagedPoolAllocMap, Base, 1);
1692 Base++;
1693 Addr += PAGE_SIZE;
1694 }
1695 }
1696
1697 #endif /* WHOLE_PAGE_ALLOCATIONS */
1698
1699 /* EOF */