Merge 13159:13510 from trunk
[reactos.git] / reactos / ntoskrnl / mm / npool.c
1 /* $Id$
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/npool.c
6 * PURPOSE: Implements the kernel memory pool
7 *
8 * PROGRAMMERS: David Welch (welch@cwcom.net)
9 * Iwan Fatahi (i_fatahi@hotmail.com)
10 * Robert Bergkvist (fragdance@hotmail.com)
11 */
12
13 /* INCLUDES ****************************************************************/
14
15 #include <ntoskrnl.h>
16 #define NDEBUG
17 #include <internal/debug.h>
18
19 #ifdef ENABLE_VALIDATE_POOL
20 #define VALIDATE_POOL validate_kernel_pool()
21 #else
22 #define VALIDATE_POOL
23 #endif
24
25 #if 0
26 #define POOL_TRACE(args...) do { DbgPrint(args); } while(0);
27 #else
28 #if defined(__GNUC__)
29 #define POOL_TRACE(args...)
30 #else
31 #define POOL_TRACE
32 #endif /* __GNUC__ */
33 #endif
34
35 /* avl types ****************************************************************/
36
37 /* FIXME:
38 * This declarations should be moved into a separate header file.
39 */
40
41 typedef struct _NODE
42 {
43 struct _NODE* link[2];
44 struct _NODE* parent;
45 signed char balance;
46 }
47 NODE, *PNODE;
48
49 /* TYPES *******************************************************************/
50
51 #define BLOCK_HDR_USED_MAGIC (0xdeadbeef)
52 #define BLOCK_HDR_FREE_MAGIC (0xceadbeef)
53
54 /*
55 * fields present at the start of a block (this is for internal use only)
56 */
57 typedef struct _BLOCK_HDR
58 {
59 ULONG Magic;
60 ULONG Size;
61 struct _BLOCK_HDR* previous;
62 union
63 {
64 struct
65 {
66 LIST_ENTRY ListEntry;
67 ULONG Tag;
68 PVOID Caller;
69 LIST_ENTRY TagListEntry;
70 BOOLEAN Dumped;
71 }
72 Used;
73 struct
74 {
75 NODE Node;
76 }
77 Free;
78 };
79 }
80 BLOCK_HDR;
81
82 #define BLOCK_HDR_SIZE ROUND_UP(sizeof(BLOCK_HDR), MM_POOL_ALIGNMENT)
83
84 PVOID STDCALL
85 ExAllocateWholePageBlock(ULONG Size);
86 VOID STDCALL
87 ExFreeWholePageBlock(PVOID Addr);
88
89 /* GLOBALS *****************************************************************/
90
91 extern PVOID MiNonPagedPoolStart;
92 extern ULONG MiNonPagedPoolLength;
93
94 /*
95 * Head of the list of free blocks
96 */
97 static PNODE FreeBlockListRoot = NULL;
98
99 /*
100 * Head of the list of in use block
101 */
102 static LIST_ENTRY UsedBlockListHead;
103
104 static LIST_ENTRY AddressListHead;
105
106 #ifndef WHOLE_PAGE_ALLOCATIONS
107 /*
108 * Count of free blocks
109 */
110 static ULONG EiNrFreeBlocks = 0;
111
112 /*
113 * Count of used blocks
114 */
115 static ULONG EiNrUsedBlocks = 0;
116 #endif
117
118 /*
119 * Lock that protects the non-paged pool data structures
120 */
121 static KSPIN_LOCK MmNpoolLock;
122
123 /*
124 * Total memory used for free nonpaged pool blocks
125 */
126 ULONG EiFreeNonPagedPool = 0;
127
128 /*
129 * Total memory used for nonpaged pool blocks
130 */
131 ULONG EiUsedNonPagedPool = 0;
132
133 /* Total quota for Non Paged Pool */
134 ULONG MmTotalNonPagedPoolQuota = 0;
135
136 /*
137 * Allocate a range of memory in the nonpaged pool
138 */
139 PVOID
140 MiAllocNonPagedPoolRegion(unsigned int nr_pages);
141
142 VOID
143 MiFreeNonPagedPoolRegion(PVOID Addr, ULONG Count, BOOLEAN Free);
144
145 #ifdef TAG_STATISTICS_TRACKING
146 #define TAG_HASH_TABLE_SIZE (1024)
147 static LIST_ENTRY tag_hash_table[TAG_HASH_TABLE_SIZE];
148 #endif /* TAG_STATISTICS_TRACKING */
149
150 #ifdef WHOLE_PAGE_ALLOCATIONS
151 static RTL_BITMAP NonPagedPoolAllocMap;
152 static ULONG NonPagedPoolAllocMapHint;
153 static ULONG MiCurrentNonPagedPoolLength = 0;
154 #else
155 static PULONG MiNonPagedPoolAllocMap;
156 static ULONG MiNonPagedPoolNrOfPages;
157 #endif /* WHOLE_PAGE_ALLOCATIONS */
158
159 /* avl helper functions ****************************************************/
160
161 void DumpFreeBlockNode(PNODE p)
162 {
163 static int count = 0;
164 BLOCK_HDR* blk;
165
166 count++;
167
168 if (p)
169 {
170 DumpFreeBlockNode(p->link[0]);
171 blk = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
172 DbgPrint("%08x %8d (%d)\n", blk, blk->Size, count);
173 DumpFreeBlockNode(p->link[1]);
174 }
175
176 count--;
177 }
178 void DumpFreeBlockTree(void)
179 {
180 DbgPrint("--- Begin tree ------------------\n");
181 DbgPrint("%08x\n", CONTAINING_RECORD(FreeBlockListRoot, BLOCK_HDR, Free.Node));
182 DumpFreeBlockNode(FreeBlockListRoot);
183 DbgPrint("--- End tree --------------------\n");
184 }
185
186 int compare_node(PNODE p1, PNODE p2)
187 {
188 BLOCK_HDR* blk1 = CONTAINING_RECORD(p1, BLOCK_HDR, Free.Node);
189 BLOCK_HDR* blk2 = CONTAINING_RECORD(p2, BLOCK_HDR, Free.Node);
190
191 if (blk1->Size == blk2->Size)
192 {
193 if (blk1 < blk2)
194 {
195 return -1;
196 }
197 if (blk1 > blk2)
198 {
199 return 1;
200 }
201 }
202 else
203 {
204 if (blk1->Size < blk2->Size)
205 {
206 return -1;
207 }
208 if (blk1->Size > blk2->Size)
209 {
210 return 1;
211 }
212 }
213 return 0;
214
215 }
216
217 int compare_value(PVOID value, PNODE p)
218 {
219 BLOCK_HDR* blk = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
220 ULONG v = *(PULONG)value;
221
222 if (v < blk->Size)
223 {
224 return -1;
225 }
226 if (v > blk->Size)
227 {
228 return 1;
229 }
230 return 0;
231 }
232
233 /* avl functions **********************************************************/
234
235 /* FIXME:
236 * The avl functions should be moved into a separate file.
237 */
238
239 /* The avl_insert and avl_remove are based on libavl (library for manipulation of binary trees). */
240
241 void avl_insert (PNODE * root, PNODE n, int (*compare)(PNODE, PNODE))
242 {
243 PNODE y; /* Top node to update balance factor, and parent. */
244 PNODE p, q; /* Iterator, and parent. */
245 PNODE w; /* New root of rebalanced subtree. */
246 int dir = 0; /* Direction to descend. */
247
248 n->link[0] = n->link[1] = n->parent = NULL;
249 n->balance = 0;
250
251 y = *root;
252 for (q = NULL, p = *root; p != NULL; q = p, p = p->link[dir])
253 {
254 dir = compare(n, p) > 0;
255 if (p->balance != 0)
256 {
257 y = p;
258 }
259 }
260
261 n->parent = q;
262 if (q != NULL)
263 {
264 q->link[dir] = n;
265 }
266 else
267 {
268 *root = n;
269 }
270
271 if (*root == n)
272 {
273 return;
274 }
275
276 for (p = n; p != y; p = q)
277 {
278 q = p->parent;
279 dir = q->link[0] != p;
280 if (dir == 0)
281 {
282 q->balance--;
283 }
284 else
285 {
286 q->balance++;
287 }
288 }
289
290 if (y->balance == -2)
291 {
292 PNODE x = y->link[0];
293 if (x->balance == -1)
294 {
295 w = x;
296 y->link[0] = x->link[1];
297 x->link[1] = y;
298 x->balance = y->balance = 0;
299 x->parent = y->parent;
300 y->parent = x;
301 if (y->link[0] != NULL)
302 {
303 y->link[0]->parent = y;
304 }
305 }
306 else
307 {
308 ASSERT(x->balance == +1);
309 w = x->link[1];
310 x->link[1] = w->link[0];
311 w->link[0] = x;
312 y->link[0] = w->link[1];
313 w->link[1] = y;
314 if (w->balance == -1)
315 {
316 x->balance = 0;
317 y->balance = +1;
318 }
319 else if (w->balance == 0)
320 {
321 x->balance = y->balance = 0;
322 }
323 else /* |w->pavl_balance == +1| */
324 {
325 x->balance = -1;
326 y->balance = 0;
327 }
328 w->balance = 0;
329 w->parent = y->parent;
330 x->parent = y->parent = w;
331 if (x->link[1] != NULL)
332 {
333 x->link[1]->parent = x;
334 }
335 if (y->link[0] != NULL)
336 {
337 y->link[0]->parent = y;
338 }
339 }
340 }
341 else if (y->balance == +2)
342 {
343 PNODE x = y->link[1];
344 if (x->balance == +1)
345 {
346 w = x;
347 y->link[1] = x->link[0];
348 x->link[0] = y;
349 x->balance = y->balance = 0;
350 x->parent = y->parent;
351 y->parent = x;
352 if (y->link[1] != NULL)
353 {
354 y->link[1]->parent = y;
355 }
356 }
357 else
358 {
359 ASSERT(x->balance == -1);
360 w = x->link[0];
361 x->link[0] = w->link[1];
362 w->link[1] = x;
363 y->link[1] = w->link[0];
364 w->link[0] = y;
365 if (w->balance == 1)
366 {
367 x->balance = 0;
368 y->balance = -1;
369 }
370 else if (w->balance == 0)
371 {
372 x->balance = y->balance = 0;
373 }
374 else /* |w->pavl_balance == -1| */
375 {
376 x->balance = +1;
377 y->balance = 0;
378 }
379 w->balance = 0;
380 w->parent = y->parent;
381 x->parent = y->parent = w;
382 if (x->link[0] != NULL)
383 {
384 x->link[0]->parent = x;
385 }
386 if (y->link[1] != NULL)
387 {
388 y->link[1]->parent = y;
389 }
390 }
391 }
392 else
393 {
394 return;
395 }
396 if (w->parent != NULL)
397 {
398 w->parent->link[y != w->parent->link[0]] = w;
399 }
400 else
401 {
402 *root = w;
403 }
404
405 return;
406 }
407
408 void avl_remove (PNODE *root, PNODE item, int (*compare)(PNODE, PNODE))
409 {
410 PNODE p; /* Traverses tree to find node to delete. */
411 PNODE q; /* Parent of |p|. */
412 int dir; /* Side of |q| on which |p| is linked. */
413
414 if (root == NULL || *root == NULL)
415 {
416 return ;
417 }
418
419 p = item;
420 q = p->parent;
421 if (q == NULL)
422 {
423 q = (PNODE) root;
424 dir = 0;
425 }
426 else
427 {
428 dir = compare(p, q) > 0;
429 }
430
431 if (p->link[1] == NULL)
432 {
433 q->link[dir] = p->link[0];
434 if (q->link[dir] != NULL)
435 {
436 q->link[dir]->parent = p->parent;
437 }
438 }
439 else
440 {
441 PNODE r = p->link[1];
442 if (r->link[0] == NULL)
443 {
444 r->link[0] = p->link[0];
445 q->link[dir] = r;
446 r->parent = p->parent;
447 if (r->link[0] != NULL)
448 {
449 r->link[0]->parent = r;
450 }
451 r->balance = p->balance;
452 q = r;
453 dir = 1;
454 }
455 else
456 {
457 PNODE s = r->link[0];
458 while (s->link[0] != NULL)
459 {
460 s = s->link[0];
461 }
462 r = s->parent;
463 r->link[0] = s->link[1];
464 s->link[0] = p->link[0];
465 s->link[1] = p->link[1];
466 q->link[dir] = s;
467 if (s->link[0] != NULL)
468 {
469 s->link[0]->parent = s;
470 }
471 s->link[1]->parent = s;
472 s->parent = p->parent;
473 if (r->link[0] != NULL)
474 {
475 r->link[0]->parent = r;
476 }
477 s->balance = p->balance;
478 q = r;
479 dir = 0;
480 }
481 }
482
483 item->link[0] = item->link[1] = item->parent = NULL;
484 item->balance = 0;
485
486 while (q != (PNODE) root)
487 {
488 PNODE y = q;
489
490 if (y->parent != NULL)
491 {
492 q = y->parent;
493 }
494 else
495 {
496 q = (PNODE) root;
497 }
498
499 if (dir == 0)
500 {
501 dir = q->link[0] != y;
502 y->balance++;
503 if (y->balance == +1)
504 {
505 break;
506 }
507 else if (y->balance == +2)
508 {
509 PNODE x = y->link[1];
510 if (x->balance == -1)
511 {
512 PNODE w;
513
514 ASSERT(x->balance == -1);
515 w = x->link[0];
516 x->link[0] = w->link[1];
517 w->link[1] = x;
518 y->link[1] = w->link[0];
519 w->link[0] = y;
520 if (w->balance == +1)
521 {
522 x->balance = 0;
523 y->balance = -1;
524 }
525 else if (w->balance == 0)
526 {
527 x->balance = y->balance = 0;
528 }
529 else /* |w->pavl_balance == -1| */
530 {
531 x->balance = +1;
532 y->balance = 0;
533 }
534 w->balance = 0;
535 w->parent = y->parent;
536 x->parent = y->parent = w;
537 if (x->link[0] != NULL)
538 {
539 x->link[0]->parent = x;
540 }
541 if (y->link[1] != NULL)
542 {
543 y->link[1]->parent = y;
544 }
545 q->link[dir] = w;
546 }
547 else
548 {
549 y->link[1] = x->link[0];
550 x->link[0] = y;
551 x->parent = y->parent;
552 y->parent = x;
553 if (y->link[1] != NULL)
554 {
555 y->link[1]->parent = y;
556 }
557 q->link[dir] = x;
558 if (x->balance == 0)
559 {
560 x->balance = -1;
561 y->balance = +1;
562 break;
563 }
564 else
565 {
566 x->balance = y->balance = 0;
567 y = x;
568 }
569 }
570 }
571 }
572 else
573 {
574 dir = q->link[0] != y;
575 y->balance--;
576 if (y->balance == -1)
577 {
578 break;
579 }
580 else if (y->balance == -2)
581 {
582 PNODE x = y->link[0];
583 if (x->balance == +1)
584 {
585 PNODE w;
586 ASSERT(x->balance == +1);
587 w = x->link[1];
588 x->link[1] = w->link[0];
589 w->link[0] = x;
590 y->link[0] = w->link[1];
591 w->link[1] = y;
592 if (w->balance == -1)
593 {
594 x->balance = 0;
595 y->balance = +1;
596 }
597 else if (w->balance == 0)
598 {
599 x->balance = y->balance = 0;
600 }
601 else /* |w->pavl_balance == +1| */
602 {
603 x->balance = -1;
604 y->balance = 0;
605 }
606 w->balance = 0;
607 w->parent = y->parent;
608 x->parent = y->parent = w;
609 if (x->link[1] != NULL)
610 {
611 x->link[1]->parent = x;
612 }
613 if (y->link[0] != NULL)
614 {
615 y->link[0]->parent = y;
616 }
617 q->link[dir] = w;
618 }
619 else
620 {
621 y->link[0] = x->link[1];
622 x->link[1] = y;
623 x->parent = y->parent;
624 y->parent = x;
625 if (y->link[0] != NULL)
626 {
627 y->link[0]->parent = y;
628 }
629 q->link[dir] = x;
630 if (x->balance == 0)
631 {
632 x->balance = +1;
633 y->balance = -1;
634 break;
635 }
636 else
637 {
638 x->balance = y->balance = 0;
639 y = x;
640 }
641 }
642 }
643 }
644 }
645
646 }
647
648 PNODE _cdecl avl_get_first(PNODE root)
649 {
650 PNODE p;
651 if (root == NULL)
652 {
653 return NULL;
654 }
655 p = root;
656 while (p->link[0])
657 {
658 p = p->link[0];
659 }
660 return p;
661 }
662
663 PNODE avl_get_next(PNODE root, PNODE p)
664 {
665 PNODE q;
666 if (p->link[1])
667 {
668 p = p->link[1];
669 while(p->link[0])
670 {
671 p = p->link[0];
672 }
673 return p;
674 }
675 else
676 {
677 q = p->parent;
678 while (q && q->link[1] == p)
679 {
680 p = q;
681 q = q->parent;
682 }
683 if (q == NULL)
684 {
685 return NULL;
686 }
687 else
688 {
689 return q;
690 }
691 }
692 }
693
694 PNODE avl_find_equal_or_greater(PNODE root, ULONG size, int (compare)(PVOID, PNODE))
695 {
696 PNODE p;
697 PNODE prev = NULL;
698 int cmp;
699
700 for (p = root; p != NULL;)
701 {
702 cmp = compare((PVOID)&size, p);
703 if (cmp < 0)
704 {
705 prev = p;
706 p = p->link[0];
707 }
708 else if (cmp > 0)
709 {
710 p = p->link[1];
711 }
712 else
713 {
714 while (p->link[0])
715 {
716 cmp = compare((PVOID)&size, p->link[0]);
717 if (cmp != 0)
718 {
719 break;
720 }
721 p = p->link[0];
722 }
723 return p;
724 }
725 }
726 return prev;
727 }
728
729 /* non paged pool functions ************************************************/
730
731 #ifdef TAG_STATISTICS_TRACKING
732 VOID
733 MiRemoveFromTagHashTable(BLOCK_HDR* block)
734 /*
735 * Remove a block from the tag hash table
736 */
737 {
738 if (block->Used.Tag == 0)
739 {
740 return;
741 }
742
743 RemoveEntryList(&block->Used.TagListEntry);
744 }
745
746 VOID
747 MiAddToTagHashTable(BLOCK_HDR* block)
748 /*
749 * Add a block to the tag hash table
750 */
751 {
752 ULONG hash;
753
754 if (block->Used.Tag == 0)
755 {
756 return;
757 }
758
759 hash = block->Used.Tag % TAG_HASH_TABLE_SIZE;
760
761 InsertHeadList(&tag_hash_table[hash], &block->Used.TagListEntry);
762 }
763 #endif /* TAG_STATISTICS_TRACKING */
764
765 #if defined(TAG_STATISTICS_TRACKING) && !defined(WHOLE_PAGE_ALLOCATIONS)
766 VOID STATIC
767 MiDumpTagStats(ULONG CurrentTag, ULONG CurrentNrBlocks, ULONG CurrentSize)
768 {
769 CHAR c1, c2, c3, c4;
770
771 c1 = (CHAR)((CurrentTag >> 24) & 0xFF);
772 c2 = (CHAR)((CurrentTag >> 16) & 0xFF);
773 c3 = (CHAR)((CurrentTag >> 8) & 0xFF);
774 c4 = (CHAR)(CurrentTag & 0xFF);
775
776 if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
777 {
778 DbgPrint("Tag %x (%c%c%c%c) Blocks %d Total Size %d Average Size %d\n",
779 CurrentTag, c4, c3, c2, c1, CurrentNrBlocks,
780 CurrentSize, CurrentSize / CurrentNrBlocks);
781 }
782 else
783 {
784 DbgPrint("Tag %x Blocks %d Total Size %d Average Size %d\n",
785 CurrentTag, CurrentNrBlocks, CurrentSize,
786 CurrentSize / CurrentNrBlocks);
787 }
788 }
789 #endif /* defined(TAG_STATISTICS_TRACKING) && !defined(WHOLE_PAGE_ALLOCATIONS); */
790
791 VOID
792 MiDebugDumpNonPagedPoolStats(BOOLEAN NewOnly)
793 {
794 #if defined(TAG_STATISTICS_TRACKING) && !defined(WHOLE_PAGE_ALLOCATIONS)
795 ULONG i;
796 BLOCK_HDR* current;
797 ULONG CurrentTag;
798 ULONG CurrentNrBlocks = 0;
799 ULONG CurrentSize = 0;
800 ULONG TotalBlocks;
801 ULONG TotalSize;
802 ULONG Size;
803 LIST_ENTRY tmpListHead;
804 PLIST_ENTRY current_entry;
805
806 DbgPrint("******* Dumping non paging pool stats ******\n");
807 TotalBlocks = 0;
808 TotalSize = 0;
809 for (i = 0; i < TAG_HASH_TABLE_SIZE; i++)
810 {
811 InitializeListHead(&tmpListHead);
812
813 while (!IsListEmpty(&tag_hash_table[i]))
814 {
815 CurrentTag = 0;
816
817 current_entry = tag_hash_table[i].Flink;
818 while (current_entry != &tag_hash_table[i])
819 {
820 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, Used.TagListEntry);
821 current_entry = current_entry->Flink;
822 if (CurrentTag == 0)
823 {
824 CurrentTag = current->Used.Tag;
825 CurrentNrBlocks = 0;
826 CurrentSize = 0;
827 }
828 if (current->Used.Tag == CurrentTag)
829 {
830 RemoveEntryList(&current->Used.TagListEntry);
831 InsertHeadList(&tmpListHead, &current->Used.TagListEntry);
832 if (!NewOnly || !current->Used.Dumped)
833 {
834 CurrentNrBlocks++;
835 TotalBlocks++;
836 CurrentSize += current->Size;
837 TotalSize += current->Size;
838 current->Used.Dumped = TRUE;
839 }
840 }
841 }
842 if (CurrentTag != 0 && CurrentNrBlocks != 0)
843 {
844 MiDumpTagStats(CurrentTag, CurrentNrBlocks, CurrentSize);
845 }
846 }
847 if (!IsListEmpty(&tmpListHead))
848 {
849 tag_hash_table[i].Flink = tmpListHead.Flink;
850 tag_hash_table[i].Flink->Blink = &tag_hash_table[i];
851 tag_hash_table[i].Blink = tmpListHead.Blink;
852 tag_hash_table[i].Blink->Flink = &tag_hash_table[i];
853 }
854 }
855 if (TotalBlocks != 0)
856 {
857 DbgPrint("TotalBlocks %d TotalSize %d AverageSize %d\n",
858 TotalBlocks, TotalSize, TotalSize / TotalBlocks);
859 }
860 else
861 {
862 DbgPrint("TotalBlocks %d TotalSize %d\n",
863 TotalBlocks, TotalSize);
864 }
865 Size = EiFreeNonPagedPool - (MiNonPagedPoolLength - MiNonPagedPoolNrOfPages * PAGE_SIZE);
866 DbgPrint("Freeblocks %d TotalFreeSize %d AverageFreeSize %d\n",
867 EiNrFreeBlocks, Size, EiNrFreeBlocks ? Size / EiNrFreeBlocks : 0);
868 DbgPrint("***************** Dump Complete ***************\n");
869 #endif /* defined(TAG_STATISTICS_TRACKING) && !defined(WHOLE_PAGE_ALLOCATIONS) */
870 }
871
872 VOID
873 MiDebugDumpNonPagedPool(BOOLEAN NewOnly)
874 {
875 #ifndef WHOLE_PAGE_ALLOCATIONS
876 BLOCK_HDR* current;
877 PLIST_ENTRY current_entry;
878 KIRQL oldIrql;
879
880 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
881
882 DbgPrint("******* Dumping non paging pool contents ******\n");
883 current_entry = UsedBlockListHead.Flink;
884 while (current_entry != &UsedBlockListHead)
885 {
886 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, Used.ListEntry);
887 if (!NewOnly || !current->Used.Dumped)
888 {
889 CHAR c1, c2, c3, c4;
890
891 c1 = (CHAR)((current->Used.Tag >> 24) & 0xFF);
892 c2 = (CHAR)((current->Used.Tag >> 16) & 0xFF);
893 c3 = (CHAR)((current->Used.Tag >> 8) & 0xFF);
894 c4 = (CHAR)(current->Used.Tag & 0xFF);
895
896 if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
897 {
898 DbgPrint("Size 0x%x Tag 0x%x (%c%c%c%c) Allocator 0x%x\n",
899 current->Size, current->Used.Tag, c4, c3, c2, c1,
900 current->Used.Caller);
901 }
902 else
903 {
904 DbgPrint("Size 0x%x Tag 0x%x Allocator 0x%x\n",
905 current->Size, current->Used.Tag, current->Used.Caller);
906 }
907 current->Used.Dumped = TRUE;
908 }
909 current_entry = current_entry->Flink;
910 }
911 DbgPrint("***************** Dump Complete ***************\n");
912 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
913 #endif /* not WHOLE_PAGE_ALLOCATIONS */
914 }
915
916 #ifndef WHOLE_PAGE_ALLOCATIONS
917
918 #ifdef ENABLE_VALIDATE_POOL
919 static void validate_free_list(void)
920 /*
921 * FUNCTION: Validate the integrity of the list of free blocks
922 */
923 {
924 BLOCK_HDR* current;
925 unsigned int blocks_seen=0;
926 PNODE p;
927
928 p = avl_get_first(FreeBlockListRoot);
929
930 while(p)
931 {
932 PVOID base_addr;
933
934 current = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
935 base_addr = (PVOID)current;
936
937 if (current->Magic != BLOCK_HDR_FREE_MAGIC)
938 {
939 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
940 current);
941 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
942 }
943
944 if (base_addr < MiNonPagedPoolStart ||
945 base_addr + BLOCK_HDR_SIZE + current->Size > MiNonPagedPoolStart + MiNonPagedPoolLength)
946 {
947 DbgPrint("Block %x found outside pool area\n",current);
948 DbgPrint("Size %d\n",current->Size);
949 DbgPrint("Limits are %x %x\n",MiNonPagedPoolStart,
950 MiNonPagedPoolStart+MiNonPagedPoolLength);
951 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
952 }
953 blocks_seen++;
954 if (blocks_seen > EiNrFreeBlocks)
955 {
956 DbgPrint("Too many blocks on free list\n");
957 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
958 }
959 p = avl_get_next(FreeBlockListRoot, p);
960 }
961 }
962
963 static void validate_used_list(void)
964 /*
965 * FUNCTION: Validate the integrity of the list of used blocks
966 */
967 {
968 BLOCK_HDR* current;
969 PLIST_ENTRY current_entry;
970 unsigned int blocks_seen=0;
971
972 current_entry = UsedBlockListHead.Flink;
973 while (current_entry != &UsedBlockListHead)
974 {
975 PVOID base_addr;
976
977 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, Used.ListEntry);
978 base_addr = (PVOID)current;
979
980 if (current->Magic != BLOCK_HDR_USED_MAGIC)
981 {
982 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
983 current);
984 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
985 }
986 if (base_addr < MiNonPagedPoolStart ||
987 (base_addr+BLOCK_HDR_SIZE+current->Size) >
988 MiNonPagedPoolStart+MiNonPagedPoolLength)
989 {
990 DbgPrint("Block %x found outside pool area\n",current);
991 DbgPrint("Size %d\n",current->Size);
992 DbgPrint("Limits are %x %x\n",MiNonPagedPoolStart,
993 MiNonPagedPoolStart+MiNonPagedPoolLength);
994 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
995 }
996 blocks_seen++;
997 if (blocks_seen > EiNrUsedBlocks)
998 {
999 DbgPrint("Too many blocks on used list\n");
1000 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1001 }
1002 if (current->Used.ListEntry.Flink != &UsedBlockListHead &&
1003 current->Used.ListEntry.Flink->Blink != &current->Used.ListEntry)
1004 {
1005 DbgPrint("%s:%d:Break in list (current %x next %x "
1006 "current->next->previous %x)\n",
1007 __FILE__,__LINE__,current, current->Used.ListEntry.Flink,
1008 current->Used.ListEntry.Flink->Blink);
1009 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1010 }
1011
1012 current_entry = current_entry->Flink;
1013 }
1014 }
1015
1016 static void check_duplicates(BLOCK_HDR* blk)
1017 /*
1018 * FUNCTION: Check a block has no duplicates
1019 * ARGUMENTS:
1020 * blk = block to check
1021 * NOTE: Bug checks if duplicates are found
1022 */
1023 {
1024 char* base = (char*)blk;
1025 char* last = ((char*)blk) + BLOCK_HDR_SIZE + blk->Size;
1026 BLOCK_HDR* current;
1027 PLIST_ENTRY current_entry;
1028 PNODE p;
1029
1030 p = avl_get_first(FreeBlockListRoot);
1031
1032 while (p)
1033 {
1034 current = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
1035
1036 if (current->Magic != BLOCK_HDR_FREE_MAGIC)
1037 {
1038 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
1039 current);
1040 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1041 }
1042
1043 if ( (char*)current > base && (char*)current < last )
1044 {
1045 DbgPrint("intersecting blocks on list\n");
1046 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1047 }
1048 if ( (char*)current < base &&
1049 ((char*)current + current->Size + BLOCK_HDR_SIZE)
1050 > base )
1051 {
1052 DbgPrint("intersecting blocks on list\n");
1053 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1054 }
1055 p = avl_get_next(FreeBlockListRoot, p);
1056 }
1057
1058 current_entry = UsedBlockListHead.Flink;
1059 while (current_entry != &UsedBlockListHead)
1060 {
1061 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, Used.ListEntry);
1062
1063 if ( (char*)current > base && (char*)current < last )
1064 {
1065 DbgPrint("intersecting blocks on list\n");
1066 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1067 }
1068 if ( (char*)current < base &&
1069 ((char*)current + current->Size + BLOCK_HDR_SIZE)
1070 > base )
1071 {
1072 DbgPrint("intersecting blocks on list\n");
1073 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1074 }
1075
1076 current_entry = current_entry->Flink;
1077 }
1078
1079 }
1080
1081 static void validate_kernel_pool(void)
1082 /*
1083 * FUNCTION: Checks the integrity of the kernel memory heap
1084 */
1085 {
1086 BLOCK_HDR* current;
1087 PLIST_ENTRY current_entry;
1088 PNODE p;
1089
1090 validate_free_list();
1091 validate_used_list();
1092
1093 p = avl_get_first(FreeBlockListRoot);
1094 while (p)
1095 {
1096 current = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
1097 check_duplicates(current);
1098 p = avl_get_next(FreeBlockListRoot, p);
1099 }
1100 current_entry = UsedBlockListHead.Flink;
1101 while (current_entry != &UsedBlockListHead)
1102 {
1103 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, Used.ListEntry);
1104 check_duplicates(current);
1105 current_entry = current_entry->Flink;
1106 }
1107 }
1108 #endif
1109
1110 #if 0
1111 STATIC VOID
1112 free_pages(BLOCK_HDR* blk)
1113 {
1114 ULONG start;
1115 ULONG end;
1116 ULONG i;
1117
1118 start = (ULONG)blk;
1119 end = (ULONG)blk + BLOCK_HDR_SIZE + blk->Size;
1120
1121 /*
1122 * If the block doesn't contain a whole page then there is nothing to do
1123 */
1124 if (PAGE_ROUND_UP(start) >= PAGE_ROUND_DOWN(end))
1125 {
1126 return;
1127 }
1128 }
1129 #endif
1130
1131 static void remove_from_used_list(BLOCK_HDR* current)
1132 {
1133 RemoveEntryList(&current->Used.ListEntry);
1134 EiUsedNonPagedPool -= current->Size;
1135 EiNrUsedBlocks--;
1136 }
1137
1138 static void remove_from_free_list(BLOCK_HDR* current)
1139 {
1140 DPRINT("remove_from_free_list %d\n", current->Size);
1141
1142 avl_remove(&FreeBlockListRoot, &current->Free.Node, compare_node);
1143
1144 EiFreeNonPagedPool -= current->Size;
1145 EiNrFreeBlocks--;
1146 DPRINT("remove_from_free_list done\n");
1147 #ifdef DUMP_AVL
1148
1149 DumpFreeBlockTree();
1150 #endif
1151 }
1152
1153 static void
1154 add_to_free_list(BLOCK_HDR* blk)
1155 /*
1156 * FUNCTION: add the block to the free list (internal)
1157 */
1158 {
1159 BLOCK_HDR* current;
1160 BOOL UpdatePrevPtr = FALSE;
1161
1162 DPRINT("add_to_free_list (%d)\n", blk->Size);
1163
1164 EiNrFreeBlocks++;
1165
1166 current = blk->previous;
1167 if (current && current->Magic == BLOCK_HDR_FREE_MAGIC)
1168 {
1169 remove_from_free_list(current);
1170 current->Size = current->Size + BLOCK_HDR_SIZE + blk->Size;
1171 current->Magic = BLOCK_HDR_USED_MAGIC;
1172 memset(blk, 0xcc, BLOCK_HDR_SIZE);
1173 blk = current;
1174 UpdatePrevPtr = TRUE;
1175 }
1176
1177 current = (BLOCK_HDR*)((char*)blk + BLOCK_HDR_SIZE + blk->Size);
1178 if ((char*)current < (char*)MiNonPagedPoolStart + MiNonPagedPoolLength &&
1179 current->Magic == BLOCK_HDR_FREE_MAGIC)
1180 {
1181 remove_from_free_list(current);
1182 blk->Size += BLOCK_HDR_SIZE + current->Size;
1183 memset(current, 0xcc, BLOCK_HDR_SIZE);
1184 UpdatePrevPtr = TRUE;
1185 current = (BLOCK_HDR*)((char*)blk + BLOCK_HDR_SIZE + blk->Size);
1186 }
1187 if (UpdatePrevPtr &&
1188 (char*)current < (char*)MiNonPagedPoolStart + MiNonPagedPoolLength)
1189 {
1190 current->previous = blk;
1191 }
1192 DPRINT("%d\n", blk->Size);
1193 blk->Magic = BLOCK_HDR_FREE_MAGIC;
1194 EiFreeNonPagedPool += blk->Size;
1195 avl_insert(&FreeBlockListRoot, &blk->Free.Node, compare_node);
1196
1197 DPRINT("add_to_free_list done\n");
1198 #ifdef DUMP_AVL
1199
1200 DumpFreeBlockTree();
1201 #endif
1202 }
1203
1204 static void add_to_used_list(BLOCK_HDR* blk)
1205 /*
1206 * FUNCTION: add the block to the used list (internal)
1207 */
1208 {
1209 InsertHeadList(&UsedBlockListHead, &blk->Used.ListEntry);
1210 EiUsedNonPagedPool += blk->Size;
1211 EiNrUsedBlocks++;
1212 }
1213
1214 inline static void* block_to_address(BLOCK_HDR* blk)
1215 /*
1216 * FUNCTION: Translate a block header address to the corresponding block
1217 * address (internal)
1218 */
1219 {
1220 return ( (void *) ((char*)blk + BLOCK_HDR_SIZE));
1221 }
1222
1223 inline static BLOCK_HDR* address_to_block(void* addr)
1224 {
1225 return (BLOCK_HDR *)
1226 ( ((char*)addr) - BLOCK_HDR_SIZE );
1227 }
1228
1229 static BOOLEAN
1230 grow_block(BLOCK_HDR* blk, PVOID end)
1231 {
1232 NTSTATUS Status;
1233 PFN_TYPE Page[32];
1234 ULONG_PTR StartIndex, EndIndex;
1235 ULONG i, j, k;
1236
1237 StartIndex = (ULONG_PTR)(PAGE_ROUND_UP((ULONG_PTR)blk + BLOCK_HDR_SIZE - (ULONG_PTR)MiNonPagedPoolStart)) / PAGE_SIZE;
1238 EndIndex = ((ULONG_PTR)PAGE_ROUND_UP(end) - (ULONG_PTR)MiNonPagedPoolStart) / PAGE_SIZE;
1239
1240
1241 for (i = StartIndex; i < EndIndex; i++)
1242 {
1243 if (!(MiNonPagedPoolAllocMap[i / 32] & (1 << (i % 32))))
1244 {
1245 for (j = i + 1; j < EndIndex && j - i < 32; j++)
1246 {
1247 if (MiNonPagedPoolAllocMap[j / 32] & (1 << (j % 32)))
1248 {
1249 break;
1250 }
1251 }
1252 for (k = 0; k < j - i; k++)
1253 {
1254 Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &Page[k]);
1255 if (!NT_SUCCESS(Status))
1256 {
1257 for (i = 0; i < k; i++)
1258 {
1259 MmReleasePageMemoryConsumer(MC_NPPOOL, Page[i]);
1260 }
1261 return FALSE;
1262 }
1263 }
1264 Status = MmCreateVirtualMapping(NULL,
1265 (PVOID)((ULONG_PTR)MiNonPagedPoolStart + i * PAGE_SIZE),
1266 PAGE_READWRITE|PAGE_SYSTEM,
1267 Page,
1268 k);
1269 if (!NT_SUCCESS(Status))
1270 {
1271 for (i = 0; i < k; i++)
1272 {
1273 MmReleasePageMemoryConsumer(MC_NPPOOL, Page[i]);
1274 }
1275 return FALSE;
1276 }
1277 for (j = i; j < k + i; j++)
1278 {
1279 MiNonPagedPoolAllocMap[j / 32] |= (1 << (j % 32));
1280 }
1281 MiNonPagedPoolNrOfPages += k;
1282 i += k - 1;
1283 }
1284 }
1285 return TRUE;
1286 }
1287
1288 static BLOCK_HDR* get_block(unsigned int size, unsigned long alignment)
1289 {
1290 BLOCK_HDR *blk, *current, *previous = NULL, *next = NULL, *best = NULL;
1291 ULONG previous_size = 0, current_size, next_size = 0, new_size;
1292 PVOID end;
1293 PVOID addr, aligned_addr;
1294 PNODE p;
1295
1296 DPRINT("get_block %d\n", size);
1297
1298 if (alignment == 0)
1299 {
1300 p = avl_find_equal_or_greater(FreeBlockListRoot, size, compare_value);
1301 if (p)
1302 {
1303 best = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
1304 addr = block_to_address(best);
1305 }
1306 }
1307 else
1308 {
1309 p = avl_find_equal_or_greater(FreeBlockListRoot, size, compare_value);
1310
1311 while(p)
1312 {
1313 current = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
1314 addr = block_to_address(current);
1315 /* calculate first aligned address available within this block */
1316 aligned_addr = MM_ROUND_UP(addr, alignment);
1317 /* check to see if this address is already aligned */
1318 if (addr == aligned_addr)
1319 {
1320 if (current->Size >= size &&
1321 (best == NULL || current->Size < best->Size))
1322 {
1323 best = current;
1324 }
1325 }
1326 else
1327 {
1328 /* make sure there's enough room to make a free block by the space skipped
1329 * from alignment. If not, calculate forward to the next alignment
1330 * and see if we allocate there...
1331 */
1332 new_size = (ULONG_PTR)aligned_addr - (ULONG_PTR)addr + size;
1333 if ((ULONG_PTR)aligned_addr - (ULONG_PTR)addr < BLOCK_HDR_SIZE)
1334 {
1335 /* not enough room for a free block header, add some more bytes */
1336 aligned_addr = MM_ROUND_UP(block_to_address((BLOCK_HDR*)((char*)current + BLOCK_HDR_SIZE)), alignment);
1337 new_size = (ULONG_PTR)aligned_addr - (ULONG_PTR)addr + size;
1338 }
1339 if (current->Size >= new_size &&
1340 (best == NULL || current->Size < best->Size))
1341 {
1342 best = current;
1343 }
1344 }
1345 if (best && current->Size >= size + alignment + 2 * BLOCK_HDR_SIZE)
1346 {
1347 break;
1348 }
1349 p = avl_get_next(FreeBlockListRoot, p);
1350
1351 }
1352 }
1353
1354 /*
1355 * We didn't find anything suitable at all.
1356 */
1357 if (best == NULL)
1358 {
1359 return NULL;
1360 }
1361
1362 current = best;
1363 current_size = current->Size;
1364
1365 if (alignment > 0)
1366 {
1367 addr = block_to_address(current);
1368 aligned_addr = MM_ROUND_UP(addr, alignment);
1369 if (addr != aligned_addr)
1370 {
1371 blk = address_to_block(aligned_addr);
1372 if ((char*)blk < (char*)current + BLOCK_HDR_SIZE)
1373 {
1374 aligned_addr = MM_ROUND_UP(block_to_address((BLOCK_HDR*)((char*)current + BLOCK_HDR_SIZE)), alignment);
1375 blk = address_to_block(aligned_addr);
1376 }
1377 /*
1378 * if size-aligned, break off the preceding bytes into their own block...
1379 */
1380 previous = current;
1381 previous_size = (ULONG_PTR)blk - (ULONG_PTR)previous - BLOCK_HDR_SIZE;
1382 current = blk;
1383 current_size -= ((ULONG_PTR)current - (ULONG_PTR)previous);
1384 }
1385 }
1386
1387 end = (char*)current + BLOCK_HDR_SIZE + size;
1388
1389 if (current_size >= size + BLOCK_HDR_SIZE + MM_POOL_ALIGNMENT)
1390 {
1391 /* create a new free block after our block, if the memory size is >= 4 byte for this block */
1392 next = (BLOCK_HDR*)((ULONG_PTR)current + size + BLOCK_HDR_SIZE);
1393 next_size = current_size - size - BLOCK_HDR_SIZE;
1394 current_size = size;
1395 end = (char*)next + BLOCK_HDR_SIZE;
1396 }
1397
1398 if (previous)
1399 {
1400 remove_from_free_list(previous);
1401 if (!grow_block(previous, end))
1402 {
1403 add_to_free_list(previous);
1404 return NULL;
1405 }
1406 memset(current, 0, BLOCK_HDR_SIZE);
1407 current->Size = current_size;
1408 current->Magic = BLOCK_HDR_USED_MAGIC;
1409 current->previous = previous;
1410 previous->Size = previous_size;
1411 if (next == NULL)
1412 {
1413 blk = (BLOCK_HDR*)((char*)current + BLOCK_HDR_SIZE + current->Size);
1414 if ((char*)blk < (char*)MiNonPagedPoolStart + MiNonPagedPoolLength)
1415 {
1416 blk->previous = current;
1417 }
1418 }
1419
1420 add_to_free_list(previous);
1421 }
1422 else
1423 {
1424 remove_from_free_list(current);
1425
1426 if (!grow_block(current, end))
1427 {
1428 add_to_free_list(current);
1429 return NULL;
1430 }
1431
1432 current->Magic = BLOCK_HDR_USED_MAGIC;
1433 if (next)
1434 {
1435 current->Size = current_size;
1436 }
1437 }
1438
1439 if (next)
1440 {
1441 memset(next, 0, BLOCK_HDR_SIZE);
1442
1443 next->Size = next_size;
1444 next->Magic = BLOCK_HDR_FREE_MAGIC;
1445 next->previous = current;
1446 blk = (BLOCK_HDR*)((char*)next + BLOCK_HDR_SIZE + next->Size);
1447 if ((char*)blk < (char*)MiNonPagedPoolStart + MiNonPagedPoolLength)
1448 {
1449 blk->previous = next;
1450 }
1451 add_to_free_list(next);
1452 }
1453
1454 add_to_used_list(current);
1455 VALIDATE_POOL;
1456 return current;
1457 }
1458
1459 #endif /* not WHOLE_PAGE_ALLOCATIONS */
1460
1461 VOID STDCALL ExFreeNonPagedPool (PVOID block)
1462 /*
1463 * FUNCTION: Releases previously allocated memory
1464 * ARGUMENTS:
1465 * block = block to free
1466 */
1467 {
1468 #ifdef WHOLE_PAGE_ALLOCATIONS /* WHOLE_PAGE_ALLOCATIONS */
1469 KIRQL oldIrql;
1470
1471 if (block == NULL)
1472 {
1473 return;
1474 }
1475
1476 DPRINT("freeing block %x\n",block);
1477
1478 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->size,
1479 ((PULONG)&block)[-1]);
1480
1481 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1482
1483 ExFreeWholePageBlock(block);
1484 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1485
1486 #else /* not WHOLE_PAGE_ALLOCATIONS */
1487
1488 BLOCK_HDR* blk=address_to_block(block);
1489 KIRQL oldIrql;
1490
1491 if (block == NULL)
1492 {
1493 return;
1494 }
1495
1496 DPRINT("freeing block %x\n",blk);
1497
1498 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->Size,
1499 ((PULONG)&block)[-1]);
1500
1501 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1502
1503 VALIDATE_POOL;
1504
1505 if (blk->Magic != BLOCK_HDR_USED_MAGIC)
1506 {
1507 if (blk->Magic == BLOCK_HDR_FREE_MAGIC)
1508 {
1509 DbgPrint("ExFreePool of already freed address %x\n", block);
1510 }
1511 else
1512 {
1513 DbgPrint("ExFreePool of non-allocated address %x (magic %x)\n",
1514 block, blk->Magic);
1515 }
1516 KEBUGCHECK(0);
1517 return;
1518 }
1519
1520 memset(block, 0xcc, blk->Size);
1521 #ifdef TAG_STATISTICS_TRACKING
1522
1523 MiRemoveFromTagHashTable(blk);
1524 #endif
1525
1526 remove_from_used_list(blk);
1527 blk->Magic = BLOCK_HDR_FREE_MAGIC;
1528 add_to_free_list(blk);
1529 VALIDATE_POOL;
1530 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1531
1532 #endif /* WHOLE_PAGE_ALLOCATIONS */
1533 }
1534
1535 PVOID STDCALL
1536 ExAllocateNonPagedPoolWithTag(POOL_TYPE Type, ULONG Size, ULONG Tag, PVOID Caller)
1537 {
1538 #ifdef WHOLE_PAGE_ALLOCATIONS
1539 PVOID block;
1540 KIRQL oldIrql;
1541
1542 ASSERT_IRQL(DISPATCH_LEVEL);
1543
1544 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
1545 Size,Caller);
1546
1547 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1548
1549 /*
1550 * accomodate this useful idiom
1551 */
1552 if (Size == 0)
1553 {
1554 POOL_TRACE("= NULL\n");
1555 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1556 return(NULL);
1557 }
1558
1559 block = ExAllocateWholePageBlock(Size);
1560 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1561 if (NULL == block)
1562 {
1563 DPRINT1("Trying to allocate %lu bytes from nonpaged pool - nothing suitable found, returning NULL\n",
1564 Size );
1565 }
1566 return(block);
1567
1568 #else /* not WHOLE_PAGE_ALLOCATIONS */
1569
1570 PVOID block;
1571 BLOCK_HDR* best = NULL;
1572 KIRQL oldIrql;
1573 ULONG alignment;
1574
1575 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
1576 Size,Caller);
1577
1578 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1579
1580 VALIDATE_POOL;
1581
1582 #if 0
1583 /* after some allocations print the npaged pool stats */
1584 #ifdef TAG_STATISTICS_TRACKING
1585
1586 {
1587 static ULONG counter = 0;
1588 if (counter++ % 100000 == 0)
1589 {
1590 MiDebugDumpNonPagedPoolStats(FALSE);
1591 }
1592 }
1593 #endif
1594 #endif
1595 /*
1596 * accomodate this useful idiom
1597 */
1598 if (Size == 0)
1599 {
1600 POOL_TRACE("= NULL\n");
1601 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1602 return(NULL);
1603 }
1604 /* Make the size dword alligned, this makes the block dword alligned */
1605 Size = ROUND_UP(Size, MM_POOL_ALIGNMENT);
1606
1607 if (Size >= PAGE_SIZE)
1608 {
1609 alignment = PAGE_SIZE;
1610 }
1611 else if (Type == NonPagedPoolCacheAligned ||
1612 Type == NonPagedPoolCacheAlignedMustS)
1613 {
1614 alignment = MM_CACHE_LINE_SIZE;
1615 }
1616 else
1617 {
1618 alignment = 0;
1619 }
1620
1621 best = get_block(Size, alignment);
1622 if (best == NULL)
1623 {
1624 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1625 DPRINT1("Trying to allocate %lu bytes from nonpaged pool - nothing suitable found, returning NULL\n",
1626 Size );
1627 return NULL;
1628 }
1629 best->Used.Tag = Tag;
1630 best->Used.Caller = Caller;
1631 best->Used.Dumped = FALSE;
1632 best->Used.TagListEntry.Flink = best->Used.TagListEntry.Blink = NULL;
1633 #ifdef TAG_STATISTICS_TRACKING
1634
1635 MiAddToTagHashTable(best);
1636 #endif
1637
1638 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1639 block = block_to_address(best);
1640 /* RtlZeroMemory(block, Size);*/
1641 return(block);
1642 #endif /* WHOLE_PAGE_ALLOCATIONS */
1643 }
1644
1645 #ifdef WHOLE_PAGE_ALLOCATIONS
1646
1647 PVOID STDCALL
1648 ExAllocateWholePageBlock(ULONG Size)
1649 {
1650 PVOID Address;
1651 PFN_TYPE Page;
1652 ULONG i;
1653 ULONG NrPages;
1654 ULONG Base;
1655
1656 NrPages = ROUND_UP(Size, PAGE_SIZE) / PAGE_SIZE;
1657
1658 Base = RtlFindClearBitsAndSet(&NonPagedPoolAllocMap, NrPages + 1, NonPagedPoolAllocMapHint);
1659 if (Base == 0xffffffff)
1660 {
1661 DbgPrint("Out of non paged pool space.\n");
1662 KEBUGCHECK(0);
1663 }
1664 if (NonPagedPoolAllocMapHint == Base)
1665 {
1666 NonPagedPoolAllocMapHint += (NrPages + 1);
1667 }
1668
1669 Address = MiNonPagedPoolStart + Base * PAGE_SIZE;
1670
1671 for (i = 0; i < NrPages; i++)
1672 {
1673 Page = MmAllocPage(MC_NPPOOL, 0);
1674 if (Page == 0)
1675 {
1676 KEBUGCHECK(0);
1677 }
1678 MmCreateVirtualMapping(NULL,
1679 Address + (i * PAGE_SIZE),
1680 PAGE_READWRITE | PAGE_SYSTEM,
1681 &Page,
1682 TRUE);
1683 }
1684
1685 MiCurrentNonPagedPoolLength = max(MiCurrentNonPagedPoolLength, (Base + NrPages) * PAGE_SIZE);
1686 Size = (Size + 7) & ~7;
1687 Address = ((PVOID)((PUCHAR)Address + (NrPages * PAGE_SIZE) - Size));
1688
1689 DPRINT("WPALLOC: %x (%d)\n", Address, Size);
1690
1691 return Address;
1692 }
1693
1694 VOID STDCALL
1695 ExFreeWholePageBlock(PVOID Addr)
1696 {
1697 ULONG Base;
1698
1699 if (Addr < MiNonPagedPoolStart ||
1700 Addr >= (MiNonPagedPoolStart + MiCurrentNonPagedPoolLength))
1701 {
1702 DbgPrint("Block %x found outside pool area\n", Addr);
1703 KEBUGCHECK(0);
1704 }
1705 Base = (Addr - MiNonPagedPoolStart) / PAGE_SIZE;
1706 NonPagedPoolAllocMapHint = min(NonPagedPoolAllocMapHint, Base);
1707 while (MmIsPagePresent(NULL, Addr))
1708 {
1709 MmDeleteVirtualMapping(NULL,
1710 Addr,
1711 TRUE,
1712 NULL,
1713 NULL);
1714 RtlClearBits(&NonPagedPoolAllocMap, Base, 1);
1715 Base++;
1716 Addr += PAGE_SIZE;
1717 }
1718 }
1719
1720 #endif /* WHOLE_PAGE_ALLOCATIONS */
1721
1722 /* Whole Page Allocations note:
1723 *
1724 * We need enough pages for these things:
1725 *
1726 * 1) bitmap buffer
1727 * 2) hdr
1728 * 3) actual pages
1729 *
1730 */
1731 VOID INIT_FUNCTION
1732 MiInitializeNonPagedPool(VOID)
1733 {
1734 NTSTATUS Status;
1735 PFN_TYPE Page;
1736 ULONG i;
1737 PVOID Address;
1738 #ifdef WHOLE_PAGE_ALLOCATIONS
1739 #else
1740
1741 BLOCK_HDR* blk;
1742 #endif
1743 #ifdef TAG_STATISTICS_TRACKING
1744
1745 for (i = 0; i < TAG_HASH_TABLE_SIZE; i++)
1746 {
1747 InitializeListHead(&tag_hash_table[i]);
1748 }
1749 #endif
1750 KeInitializeSpinLock(&MmNpoolLock);
1751 InitializeListHead(&UsedBlockListHead);
1752 InitializeListHead(&AddressListHead);
1753 FreeBlockListRoot = NULL;
1754 #ifdef WHOLE_PAGE_ALLOCATIONS
1755
1756 NonPagedPoolAllocMapHint = PAGE_ROUND_UP(MiNonPagedPoolLength / PAGE_SIZE / 8) / PAGE_SIZE; /* Pages of bitmap buffer */
1757 MiCurrentNonPagedPoolLength = NonPagedPoolAllocMapHint * PAGE_SIZE;
1758 Address = MiNonPagedPoolStart;
1759 for (i = 0; i < NonPagedPoolAllocMapHint; i++)
1760 {
1761 Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &Page);
1762 if (!NT_SUCCESS(Status))
1763 {
1764 DbgPrint("Unable to allocate a page\n");
1765 KEBUGCHECK(0);
1766 }
1767 Status = MmCreateVirtualMapping(NULL,
1768 Address,
1769 PAGE_READWRITE|PAGE_SYSTEM,
1770 &Page,
1771 1);
1772 if (!NT_SUCCESS(Status))
1773 {
1774 DbgPrint("Unable to create virtual mapping\n");
1775 KEBUGCHECK(0);
1776 }
1777 Address += PAGE_SIZE;
1778 }
1779 RtlInitializeBitMap(&NonPagedPoolAllocMap, MiNonPagedPoolStart,
1780 MiNonPagedPoolLength / PAGE_SIZE);
1781 RtlClearAllBits(&NonPagedPoolAllocMap);
1782 RtlSetBits(&NonPagedPoolAllocMap, 0, NonPagedPoolAllocMapHint);
1783 #else
1784
1785 MiNonPagedPoolAllocMap = block_to_address((BLOCK_HDR*)MiNonPagedPoolStart);
1786 MiNonPagedPoolNrOfPages = PAGE_ROUND_UP(ROUND_UP(MiNonPagedPoolLength / PAGE_SIZE, 32) / 8 + 2 * BLOCK_HDR_SIZE);
1787 MiNonPagedPoolNrOfPages /= PAGE_SIZE;
1788 Address = MiNonPagedPoolStart;
1789 for (i = 0; i < MiNonPagedPoolNrOfPages; i++)
1790 {
1791 Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &Page);
1792 if (!NT_SUCCESS(Status))
1793 {
1794 DbgPrint("Unable to allocate a page\n");
1795 KEBUGCHECK(0);
1796 }
1797 Status = MmCreateVirtualMapping(NULL,
1798 Address,
1799 PAGE_READWRITE|PAGE_SYSTEM,
1800 &Page,
1801 1);
1802 if (!NT_SUCCESS(Status))
1803 {
1804 DbgPrint("Unable to create virtual mapping\n");
1805 KEBUGCHECK(0);
1806 }
1807 MiNonPagedPoolAllocMap[i / 32] |= (1 << (i % 32));
1808 Address = (PVOID)((ULONG_PTR)Address + PAGE_SIZE);
1809 }
1810 /* the first block contains the non paged pool bitmap */
1811 blk = (BLOCK_HDR*)MiNonPagedPoolStart;
1812 memset(blk, 0, BLOCK_HDR_SIZE);
1813 blk->Magic = BLOCK_HDR_USED_MAGIC;
1814 blk->Size = ROUND_UP(MiNonPagedPoolLength / PAGE_SIZE, 32) / 8;
1815 blk->previous = NULL;
1816 blk->Used.Tag = 0xffffffff;
1817 blk->Used.Caller = 0;
1818 blk->Used.Dumped = FALSE;
1819 add_to_used_list(blk);
1820 #ifdef TAG_STATISTICS_TRACKING
1821
1822 MiAddToTagHashTable(blk);
1823 #endif
1824 /* the second block is the first free block */
1825 blk = (BLOCK_HDR*)((char*)blk + BLOCK_HDR_SIZE + blk->Size);
1826 memset(blk, 0, BLOCK_HDR_SIZE);
1827 memset((char*)blk + BLOCK_HDR_SIZE, 0x0cc, MiNonPagedPoolNrOfPages * PAGE_SIZE - ((ULONG_PTR)blk + BLOCK_HDR_SIZE - (ULONG_PTR)MiNonPagedPoolStart));
1828 blk->Magic = BLOCK_HDR_FREE_MAGIC;
1829 blk->Size = MiNonPagedPoolLength - ((ULONG_PTR)blk + BLOCK_HDR_SIZE - (ULONG_PTR)MiNonPagedPoolStart);
1830 blk->previous = (BLOCK_HDR*)MiNonPagedPoolStart;
1831 add_to_free_list(blk);
1832 #endif
1833 }
1834
1835 PVOID
1836 STDCALL
1837 MiAllocateSpecialPool (IN POOL_TYPE PoolType,
1838 IN SIZE_T NumberOfBytes,
1839 IN ULONG Tag,
1840 IN ULONG Underrun
1841 )
1842 {
1843 /* FIXME: Special Pools not Supported */
1844 DbgPrint("Special Pools not supported\n");
1845 return 0;
1846 }
1847
1848 /* EOF */