2bccf77fb4e764d83995091d30b20dc98506ed41
[reactos.git] / reactos / ntoskrnl / mm / npool.c
1 /* $Id: npool.c,v 1.91 2004/10/01 20:51:28 arty Exp $
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/npool.c
6 * PURPOSE: Implements the kernel memory pool
7 * PROGRAMMER: David Welch (welch@cwcom.net)
8 * UPDATE HISTORY:
9 * 27/05/98: Created
10 * 10/06/98: Bug fixes by Iwan Fatahi (i_fatahi@hotmail.com)
11 * in take_block (if current bigger than required)
12 * in remove_from_used_list
13 * in ExFreePool
14 * 23/08/98: Fixes from Robert Bergkvist (fragdance@hotmail.com)
15 */
16
17 /* INCLUDES ****************************************************************/
18
19 #include <ntoskrnl.h>
20 #define NDEBUG
21 #include <internal/debug.h>
22
23 /* Enable strict checking of the nonpaged pool on every allocation */
24 /*#define ENABLE_VALIDATE_POOL*/
25
26 /* Enable tracking of statistics about the tagged blocks in the pool */
27 /*#define TAG_STATISTICS_TRACKING*/
28
29 /*
30 * Put each block in its own range of pages and position the block at the
31 * end of the range so any accesses beyond the end of block are to invalid
32 * memory locations.
33 */
34 /*#define WHOLE_PAGE_ALLOCATIONS*/
35
36 #ifdef ENABLE_VALIDATE_POOL
37 #define VALIDATE_POOL validate_kernel_pool()
38 #else
39 #define VALIDATE_POOL
40 #endif
41
42 #if 0
43 #define POOL_TRACE(args...) do { DbgPrint(args); } while(0);
44 #else
45 #if defined(__GNUC__)
46 #define POOL_TRACE(args...)
47 #else
48 #define POOL_TRACE
49 #endif /* __GNUC__ */
50 #endif
51
52 /* avl types ****************************************************************/
53
54 /* FIXME:
55 * This declarations should be moved into a separate header file.
56 */
57
58 typedef struct _NODE
59 {
60 struct _NODE* link[2];
61 struct _NODE* parent;
62 signed char balance;
63 }
64 NODE, *PNODE;
65
66 /* TYPES *******************************************************************/
67
68 #define BLOCK_HDR_USED_MAGIC (0xdeadbeef)
69 #define BLOCK_HDR_FREE_MAGIC (0xceadbeef)
70
71 /*
72 * fields present at the start of a block (this is for internal use only)
73 */
74 typedef struct _BLOCK_HDR
75 {
76 ULONG Magic;
77 ULONG Size;
78 struct _BLOCK_HDR* previous;
79 union
80 {
81 struct
82 {
83 LIST_ENTRY ListEntry;
84 ULONG Tag;
85 PVOID Caller;
86 LIST_ENTRY TagListEntry;
87 BOOLEAN Dumped;
88 }
89 Used;
90 struct
91 {
92 NODE Node;
93 }
94 Free;
95 };
96 }
97 BLOCK_HDR;
98
99 #define BLOCK_HDR_SIZE ROUND_UP(sizeof(BLOCK_HDR), MM_POOL_ALIGNMENT)
100
101 PVOID STDCALL
102 ExAllocateWholePageBlock(ULONG Size);
103 VOID STDCALL
104 ExFreeWholePageBlock(PVOID Addr);
105
106 /* GLOBALS *****************************************************************/
107
108 extern PVOID MiNonPagedPoolStart;
109 extern ULONG MiNonPagedPoolLength;
110
111 /*
112 * Head of the list of free blocks
113 */
114 static PNODE FreeBlockListRoot = NULL;
115
116 /*
117 * Head of the list of in use block
118 */
119 static LIST_ENTRY UsedBlockListHead;
120
121 static LIST_ENTRY AddressListHead;
122
123 #ifndef WHOLE_PAGE_ALLOCATIONS
124 /*
125 * Count of free blocks
126 */
127 static ULONG EiNrFreeBlocks = 0;
128
129 /*
130 * Count of used blocks
131 */
132 static ULONG EiNrUsedBlocks = 0;
133 #endif
134
135 /*
136 * Lock that protects the non-paged pool data structures
137 */
138 static KSPIN_LOCK MmNpoolLock;
139
140 /*
141 * Total memory used for free nonpaged pool blocks
142 */
143 ULONG EiFreeNonPagedPool = 0;
144
145 /*
146 * Total memory used for nonpaged pool blocks
147 */
148 ULONG EiUsedNonPagedPool = 0;
149
150 /* Total quota for Non Paged Pool */
151 ULONG MmTotalNonPagedPoolQuota = 0;
152
153 /*
154 * Allocate a range of memory in the nonpaged pool
155 */
156 PVOID
157 MiAllocNonPagedPoolRegion(unsigned int nr_pages);
158
159 VOID
160 MiFreeNonPagedPoolRegion(PVOID Addr, ULONG Count, BOOLEAN Free);
161
162 #ifdef TAG_STATISTICS_TRACKING
163 #define TAG_HASH_TABLE_SIZE (1024)
164 static LIST_ENTRY tag_hash_table[TAG_HASH_TABLE_SIZE];
165 #endif /* TAG_STATISTICS_TRACKING */
166
167 #ifdef WHOLE_PAGE_ALLOCATIONS
168 static RTL_BITMAP NonPagedPoolAllocMap;
169 static ULONG NonPagedPoolAllocMapHint;
170 static ULONG MiCurrentNonPagedPoolLength = 0;
171 #else
172 static PULONG MiNonPagedPoolAllocMap;
173 static ULONG MiNonPagedPoolNrOfPages;
174 #endif /* WHOLE_PAGE_ALLOCATIONS */
175
176 /* avl helper functions ****************************************************/
177
178 void DumpFreeBlockNode(PNODE p)
179 {
180 static int count = 0;
181 BLOCK_HDR* blk;
182
183 count++;
184
185 if (p)
186 {
187 DumpFreeBlockNode(p->link[0]);
188 blk = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
189 DbgPrint("%08x %8d (%d)\n", blk, blk->Size, count);
190 DumpFreeBlockNode(p->link[1]);
191 }
192
193 count--;
194 }
195 void DumpFreeBlockTree(void)
196 {
197 DbgPrint("--- Begin tree ------------------\n");
198 DbgPrint("%08x\n", CONTAINING_RECORD(FreeBlockListRoot, BLOCK_HDR, Free.Node));
199 DumpFreeBlockNode(FreeBlockListRoot);
200 DbgPrint("--- End tree --------------------\n");
201 }
202
203 int compare_node(PNODE p1, PNODE p2)
204 {
205 BLOCK_HDR* blk1 = CONTAINING_RECORD(p1, BLOCK_HDR, Free.Node);
206 BLOCK_HDR* blk2 = CONTAINING_RECORD(p2, BLOCK_HDR, Free.Node);
207
208 if (blk1->Size == blk2->Size)
209 {
210 if (blk1 < blk2)
211 {
212 return -1;
213 }
214 if (blk1 > blk2)
215 {
216 return 1;
217 }
218 }
219 else
220 {
221 if (blk1->Size < blk2->Size)
222 {
223 return -1;
224 }
225 if (blk1->Size > blk2->Size)
226 {
227 return 1;
228 }
229 }
230 return 0;
231
232 }
233
234 int compare_value(PVOID value, PNODE p)
235 {
236 BLOCK_HDR* blk = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
237 ULONG v = *(PULONG)value;
238
239 if (v < blk->Size)
240 {
241 return -1;
242 }
243 if (v > blk->Size)
244 {
245 return 1;
246 }
247 return 0;
248 }
249
250 /* avl functions **********************************************************/
251
252 /* FIXME:
253 * The avl functions should be moved into a separate file.
254 */
255
256 /* The avl_insert and avl_remove are based on libavl (library for manipulation of binary trees). */
257
258 void avl_insert (PNODE * root, PNODE n, int (*compare)(PNODE, PNODE))
259 {
260 PNODE y; /* Top node to update balance factor, and parent. */
261 PNODE p, q; /* Iterator, and parent. */
262 PNODE w; /* New root of rebalanced subtree. */
263 int dir = 0; /* Direction to descend. */
264
265 n->link[0] = n->link[1] = n->parent = NULL;
266 n->balance = 0;
267
268 y = *root;
269 for (q = NULL, p = *root; p != NULL; q = p, p = p->link[dir])
270 {
271 dir = compare(n, p) > 0;
272 if (p->balance != 0)
273 {
274 y = p;
275 }
276 }
277
278 n->parent = q;
279 if (q != NULL)
280 {
281 q->link[dir] = n;
282 }
283 else
284 {
285 *root = n;
286 }
287
288 if (*root == n)
289 {
290 return;
291 }
292
293 for (p = n; p != y; p = q)
294 {
295 q = p->parent;
296 dir = q->link[0] != p;
297 if (dir == 0)
298 {
299 q->balance--;
300 }
301 else
302 {
303 q->balance++;
304 }
305 }
306
307 if (y->balance == -2)
308 {
309 PNODE x = y->link[0];
310 if (x->balance == -1)
311 {
312 w = x;
313 y->link[0] = x->link[1];
314 x->link[1] = y;
315 x->balance = y->balance = 0;
316 x->parent = y->parent;
317 y->parent = x;
318 if (y->link[0] != NULL)
319 {
320 y->link[0]->parent = y;
321 }
322 }
323 else
324 {
325 assert (x->balance == +1);
326 w = x->link[1];
327 x->link[1] = w->link[0];
328 w->link[0] = x;
329 y->link[0] = w->link[1];
330 w->link[1] = y;
331 if (w->balance == -1)
332 {
333 x->balance = 0;
334 y->balance = +1;
335 }
336 else if (w->balance == 0)
337 {
338 x->balance = y->balance = 0;
339 }
340 else /* |w->pavl_balance == +1| */
341 {
342 x->balance = -1;
343 y->balance = 0;
344 }
345 w->balance = 0;
346 w->parent = y->parent;
347 x->parent = y->parent = w;
348 if (x->link[1] != NULL)
349 {
350 x->link[1]->parent = x;
351 }
352 if (y->link[0] != NULL)
353 {
354 y->link[0]->parent = y;
355 }
356 }
357 }
358 else if (y->balance == +2)
359 {
360 PNODE x = y->link[1];
361 if (x->balance == +1)
362 {
363 w = x;
364 y->link[1] = x->link[0];
365 x->link[0] = y;
366 x->balance = y->balance = 0;
367 x->parent = y->parent;
368 y->parent = x;
369 if (y->link[1] != NULL)
370 {
371 y->link[1]->parent = y;
372 }
373 }
374 else
375 {
376 assert (x->balance == -1);
377 w = x->link[0];
378 x->link[0] = w->link[1];
379 w->link[1] = x;
380 y->link[1] = w->link[0];
381 w->link[0] = y;
382 if (w->balance == 1)
383 {
384 x->balance = 0;
385 y->balance = -1;
386 }
387 else if (w->balance == 0)
388 {
389 x->balance = y->balance = 0;
390 }
391 else /* |w->pavl_balance == -1| */
392 {
393 x->balance = +1;
394 y->balance = 0;
395 }
396 w->balance = 0;
397 w->parent = y->parent;
398 x->parent = y->parent = w;
399 if (x->link[0] != NULL)
400 {
401 x->link[0]->parent = x;
402 }
403 if (y->link[1] != NULL)
404 {
405 y->link[1]->parent = y;
406 }
407 }
408 }
409 else
410 {
411 return;
412 }
413 if (w->parent != NULL)
414 {
415 w->parent->link[y != w->parent->link[0]] = w;
416 }
417 else
418 {
419 *root = w;
420 }
421
422 return;
423 }
424
425 void avl_remove (PNODE *root, PNODE item, int (*compare)(PNODE, PNODE))
426 {
427 PNODE p; /* Traverses tree to find node to delete. */
428 PNODE q; /* Parent of |p|. */
429 int dir; /* Side of |q| on which |p| is linked. */
430
431 if (root == NULL || *root == NULL)
432 {
433 return ;
434 }
435
436 p = item;
437 q = p->parent;
438 if (q == NULL)
439 {
440 q = (PNODE) root;
441 dir = 0;
442 }
443 else
444 {
445 dir = compare(p, q) > 0;
446 }
447
448 if (p->link[1] == NULL)
449 {
450 q->link[dir] = p->link[0];
451 if (q->link[dir] != NULL)
452 {
453 q->link[dir]->parent = p->parent;
454 }
455 }
456 else
457 {
458 PNODE r = p->link[1];
459 if (r->link[0] == NULL)
460 {
461 r->link[0] = p->link[0];
462 q->link[dir] = r;
463 r->parent = p->parent;
464 if (r->link[0] != NULL)
465 {
466 r->link[0]->parent = r;
467 }
468 r->balance = p->balance;
469 q = r;
470 dir = 1;
471 }
472 else
473 {
474 PNODE s = r->link[0];
475 while (s->link[0] != NULL)
476 {
477 s = s->link[0];
478 }
479 r = s->parent;
480 r->link[0] = s->link[1];
481 s->link[0] = p->link[0];
482 s->link[1] = p->link[1];
483 q->link[dir] = s;
484 if (s->link[0] != NULL)
485 {
486 s->link[0]->parent = s;
487 }
488 s->link[1]->parent = s;
489 s->parent = p->parent;
490 if (r->link[0] != NULL)
491 {
492 r->link[0]->parent = r;
493 }
494 s->balance = p->balance;
495 q = r;
496 dir = 0;
497 }
498 }
499
500 item->link[0] = item->link[1] = item->parent = NULL;
501 item->balance = 0;
502
503 while (q != (PNODE) root)
504 {
505 PNODE y = q;
506
507 if (y->parent != NULL)
508 {
509 q = y->parent;
510 }
511 else
512 {
513 q = (PNODE) root;
514 }
515
516 if (dir == 0)
517 {
518 dir = q->link[0] != y;
519 y->balance++;
520 if (y->balance == +1)
521 {
522 break;
523 }
524 else if (y->balance == +2)
525 {
526 PNODE x = y->link[1];
527 if (x->balance == -1)
528 {
529 PNODE w;
530
531 assert (x->balance == -1);
532 w = x->link[0];
533 x->link[0] = w->link[1];
534 w->link[1] = x;
535 y->link[1] = w->link[0];
536 w->link[0] = y;
537 if (w->balance == +1)
538 {
539 x->balance = 0;
540 y->balance = -1;
541 }
542 else if (w->balance == 0)
543 {
544 x->balance = y->balance = 0;
545 }
546 else /* |w->pavl_balance == -1| */
547 {
548 x->balance = +1;
549 y->balance = 0;
550 }
551 w->balance = 0;
552 w->parent = y->parent;
553 x->parent = y->parent = w;
554 if (x->link[0] != NULL)
555 {
556 x->link[0]->parent = x;
557 }
558 if (y->link[1] != NULL)
559 {
560 y->link[1]->parent = y;
561 }
562 q->link[dir] = w;
563 }
564 else
565 {
566 y->link[1] = x->link[0];
567 x->link[0] = y;
568 x->parent = y->parent;
569 y->parent = x;
570 if (y->link[1] != NULL)
571 {
572 y->link[1]->parent = y;
573 }
574 q->link[dir] = x;
575 if (x->balance == 0)
576 {
577 x->balance = -1;
578 y->balance = +1;
579 break;
580 }
581 else
582 {
583 x->balance = y->balance = 0;
584 y = x;
585 }
586 }
587 }
588 }
589 else
590 {
591 dir = q->link[0] != y;
592 y->balance--;
593 if (y->balance == -1)
594 {
595 break;
596 }
597 else if (y->balance == -2)
598 {
599 PNODE x = y->link[0];
600 if (x->balance == +1)
601 {
602 PNODE w;
603 assert (x->balance == +1);
604 w = x->link[1];
605 x->link[1] = w->link[0];
606 w->link[0] = x;
607 y->link[0] = w->link[1];
608 w->link[1] = y;
609 if (w->balance == -1)
610 {
611 x->balance = 0;
612 y->balance = +1;
613 }
614 else if (w->balance == 0)
615 {
616 x->balance = y->balance = 0;
617 }
618 else /* |w->pavl_balance == +1| */
619 {
620 x->balance = -1;
621 y->balance = 0;
622 }
623 w->balance = 0;
624 w->parent = y->parent;
625 x->parent = y->parent = w;
626 if (x->link[1] != NULL)
627 {
628 x->link[1]->parent = x;
629 }
630 if (y->link[0] != NULL)
631 {
632 y->link[0]->parent = y;
633 }
634 q->link[dir] = w;
635 }
636 else
637 {
638 y->link[0] = x->link[1];
639 x->link[1] = y;
640 x->parent = y->parent;
641 y->parent = x;
642 if (y->link[0] != NULL)
643 {
644 y->link[0]->parent = y;
645 }
646 q->link[dir] = x;
647 if (x->balance == 0)
648 {
649 x->balance = +1;
650 y->balance = -1;
651 break;
652 }
653 else
654 {
655 x->balance = y->balance = 0;
656 y = x;
657 }
658 }
659 }
660 }
661 }
662
663 }
664
665 PNODE _cdecl avl_get_first(PNODE root)
666 {
667 PNODE p;
668 if (root == NULL)
669 {
670 return NULL;
671 }
672 p = root;
673 while (p->link[0])
674 {
675 p = p->link[0];
676 }
677 return p;
678 }
679
680 PNODE avl_get_next(PNODE root, PNODE p)
681 {
682 PNODE q;
683 if (p->link[1])
684 {
685 p = p->link[1];
686 while(p->link[0])
687 {
688 p = p->link[0];
689 }
690 return p;
691 }
692 else
693 {
694 q = p->parent;
695 while (q && q->link[1] == p)
696 {
697 p = q;
698 q = q->parent;
699 }
700 if (q == NULL)
701 {
702 return NULL;
703 }
704 else
705 {
706 return q;
707 }
708 }
709 }
710
711 PNODE avl_find_equal_or_greater(PNODE root, ULONG size, int (compare)(PVOID, PNODE))
712 {
713 PNODE p;
714 PNODE prev = NULL;
715 int cmp;
716
717 for (p = root; p != NULL;)
718 {
719 cmp = compare((PVOID)&size, p);
720 if (cmp < 0)
721 {
722 prev = p;
723 p = p->link[0];
724 }
725 else if (cmp > 0)
726 {
727 p = p->link[1];
728 }
729 else
730 {
731 while (p->link[0])
732 {
733 cmp = compare((PVOID)&size, p->link[0]);
734 if (cmp != 0)
735 {
736 break;
737 }
738 p = p->link[0];
739 }
740 return p;
741 }
742 }
743 return prev;
744 }
745
746 /* non paged pool functions ************************************************/
747
748 #ifdef TAG_STATISTICS_TRACKING
749 VOID
750 MiRemoveFromTagHashTable(BLOCK_HDR* block)
751 /*
752 * Remove a block from the tag hash table
753 */
754 {
755 if (block->Used.Tag == 0)
756 {
757 return;
758 }
759
760 RemoveEntryList(&block->Used.TagListEntry);
761 }
762
763 VOID
764 MiAddToTagHashTable(BLOCK_HDR* block)
765 /*
766 * Add a block to the tag hash table
767 */
768 {
769 ULONG hash;
770
771 if (block->Used.Tag == 0)
772 {
773 return;
774 }
775
776 hash = block->Used.Tag % TAG_HASH_TABLE_SIZE;
777
778 InsertHeadList(&tag_hash_table[hash], &block->Used.TagListEntry);
779 }
780 #endif /* TAG_STATISTICS_TRACKING */
781
782 #if defined(TAG_STATISTICS_TRACKING) && !defined(WHOLE_PAGE_ALLOCATIONS)
783 VOID STATIC
784 MiDumpTagStats(ULONG CurrentTag, ULONG CurrentNrBlocks, ULONG CurrentSize)
785 {
786 CHAR c1, c2, c3, c4;
787
788 c1 = (CHAR)((CurrentTag >> 24) & 0xFF);
789 c2 = (CHAR)((CurrentTag >> 16) & 0xFF);
790 c3 = (CHAR)((CurrentTag >> 8) & 0xFF);
791 c4 = (CHAR)(CurrentTag & 0xFF);
792
793 if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
794 {
795 DbgPrint("Tag %x (%c%c%c%c) Blocks %d Total Size %d Average Size %d\n",
796 CurrentTag, c4, c3, c2, c1, CurrentNrBlocks,
797 CurrentSize, CurrentSize / CurrentNrBlocks);
798 }
799 else
800 {
801 DbgPrint("Tag %x Blocks %d Total Size %d Average Size %d\n",
802 CurrentTag, CurrentNrBlocks, CurrentSize,
803 CurrentSize / CurrentNrBlocks);
804 }
805 }
806 #endif /* defined(TAG_STATISTICS_TRACKING) && !defined(WHOLE_PAGE_ALLOCATIONS); */
807
808 VOID
809 MiDebugDumpNonPagedPoolStats(BOOLEAN NewOnly)
810 {
811 #if defined(TAG_STATISTICS_TRACKING) && !defined(WHOLE_PAGE_ALLOCATIONS)
812 ULONG i;
813 BLOCK_HDR* current;
814 ULONG CurrentTag;
815 ULONG CurrentNrBlocks = 0;
816 ULONG CurrentSize = 0;
817 ULONG TotalBlocks;
818 ULONG TotalSize;
819 ULONG Size;
820 LIST_ENTRY tmpListHead;
821 PLIST_ENTRY current_entry;
822
823 DbgPrint("******* Dumping non paging pool stats ******\n");
824 TotalBlocks = 0;
825 TotalSize = 0;
826 for (i = 0; i < TAG_HASH_TABLE_SIZE; i++)
827 {
828 InitializeListHead(&tmpListHead);
829
830 while (!IsListEmpty(&tag_hash_table[i]))
831 {
832 CurrentTag = 0;
833
834 current_entry = tag_hash_table[i].Flink;
835 while (current_entry != &tag_hash_table[i])
836 {
837 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, Used.TagListEntry);
838 current_entry = current_entry->Flink;
839 if (CurrentTag == 0)
840 {
841 CurrentTag = current->Used.Tag;
842 CurrentNrBlocks = 0;
843 CurrentSize = 0;
844 }
845 if (current->Used.Tag == CurrentTag)
846 {
847 RemoveEntryList(&current->Used.TagListEntry);
848 InsertHeadList(&tmpListHead, &current->Used.TagListEntry);
849 if (!NewOnly || !current->Used.Dumped)
850 {
851 CurrentNrBlocks++;
852 TotalBlocks++;
853 CurrentSize += current->Size;
854 TotalSize += current->Size;
855 current->Used.Dumped = TRUE;
856 }
857 }
858 }
859 if (CurrentTag != 0 && CurrentNrBlocks != 0)
860 {
861 MiDumpTagStats(CurrentTag, CurrentNrBlocks, CurrentSize);
862 }
863 }
864 if (!IsListEmpty(&tmpListHead))
865 {
866 tag_hash_table[i].Flink = tmpListHead.Flink;
867 tag_hash_table[i].Flink->Blink = &tag_hash_table[i];
868 tag_hash_table[i].Blink = tmpListHead.Blink;
869 tag_hash_table[i].Blink->Flink = &tag_hash_table[i];
870 }
871 }
872 if (TotalBlocks != 0)
873 {
874 DbgPrint("TotalBlocks %d TotalSize %d AverageSize %d\n",
875 TotalBlocks, TotalSize, TotalSize / TotalBlocks);
876 }
877 else
878 {
879 DbgPrint("TotalBlocks %d TotalSize %d\n",
880 TotalBlocks, TotalSize);
881 }
882 Size = EiFreeNonPagedPool - (MiNonPagedPoolLength - MiNonPagedPoolNrOfPages * PAGE_SIZE);
883 DbgPrint("Freeblocks %d TotalFreeSize %d AverageFreeSize %d\n",
884 EiNrFreeBlocks, Size, EiNrFreeBlocks ? Size / EiNrFreeBlocks : 0);
885 DbgPrint("***************** Dump Complete ***************\n");
886 #endif /* defined(TAG_STATISTICS_TRACKING) && !defined(WHOLE_PAGE_ALLOCATIONS) */
887 }
888
889 VOID
890 MiDebugDumpNonPagedPool(BOOLEAN NewOnly)
891 {
892 #ifndef WHOLE_PAGE_ALLOCATIONS
893 BLOCK_HDR* current;
894 PLIST_ENTRY current_entry;
895 KIRQL oldIrql;
896
897 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
898
899 DbgPrint("******* Dumping non paging pool contents ******\n");
900 current_entry = UsedBlockListHead.Flink;
901 while (current_entry != &UsedBlockListHead)
902 {
903 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, Used.ListEntry);
904 if (!NewOnly || !current->Used.Dumped)
905 {
906 CHAR c1, c2, c3, c4;
907
908 c1 = (CHAR)((current->Used.Tag >> 24) & 0xFF);
909 c2 = (CHAR)((current->Used.Tag >> 16) & 0xFF);
910 c3 = (CHAR)((current->Used.Tag >> 8) & 0xFF);
911 c4 = (CHAR)(current->Used.Tag & 0xFF);
912
913 if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
914 {
915 DbgPrint("Size 0x%x Tag 0x%x (%c%c%c%c) Allocator 0x%x\n",
916 current->Size, current->Used.Tag, c4, c3, c2, c1,
917 current->Used.Caller);
918 }
919 else
920 {
921 DbgPrint("Size 0x%x Tag 0x%x Allocator 0x%x\n",
922 current->Size, current->Used.Tag, current->Used.Caller);
923 }
924 current->Used.Dumped = TRUE;
925 }
926 current_entry = current_entry->Flink;
927 }
928 DbgPrint("***************** Dump Complete ***************\n");
929 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
930 #endif /* not WHOLE_PAGE_ALLOCATIONS */
931 }
932
933 #ifndef WHOLE_PAGE_ALLOCATIONS
934
935 #ifdef ENABLE_VALIDATE_POOL
936 static void validate_free_list(void)
937 /*
938 * FUNCTION: Validate the integrity of the list of free blocks
939 */
940 {
941 BLOCK_HDR* current;
942 unsigned int blocks_seen=0;
943 PNODE p;
944
945 p = avl_get_first(FreeBlockListRoot);
946
947 while(p)
948 {
949 PVOID base_addr;
950
951 current = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
952 base_addr = (PVOID)current;
953
954 if (current->Magic != BLOCK_HDR_FREE_MAGIC)
955 {
956 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
957 current);
958 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
959 }
960
961 if (base_addr < MiNonPagedPoolStart ||
962 base_addr + BLOCK_HDR_SIZE + current->Size > MiNonPagedPoolStart + MiNonPagedPoolLength)
963 {
964 DbgPrint("Block %x found outside pool area\n",current);
965 DbgPrint("Size %d\n",current->Size);
966 DbgPrint("Limits are %x %x\n",MiNonPagedPoolStart,
967 MiNonPagedPoolStart+MiNonPagedPoolLength);
968 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
969 }
970 blocks_seen++;
971 if (blocks_seen > EiNrFreeBlocks)
972 {
973 DbgPrint("Too many blocks on free list\n");
974 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
975 }
976 p = avl_get_next(FreeBlockListRoot, p);
977 }
978 }
979
980 static void validate_used_list(void)
981 /*
982 * FUNCTION: Validate the integrity of the list of used blocks
983 */
984 {
985 BLOCK_HDR* current;
986 PLIST_ENTRY current_entry;
987 unsigned int blocks_seen=0;
988
989 current_entry = UsedBlockListHead.Flink;
990 while (current_entry != &UsedBlockListHead)
991 {
992 PVOID base_addr;
993
994 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, Used.ListEntry);
995 base_addr = (PVOID)current;
996
997 if (current->Magic != BLOCK_HDR_USED_MAGIC)
998 {
999 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
1000 current);
1001 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1002 }
1003 if (base_addr < MiNonPagedPoolStart ||
1004 (base_addr+BLOCK_HDR_SIZE+current->Size) >
1005 MiNonPagedPoolStart+MiNonPagedPoolLength)
1006 {
1007 DbgPrint("Block %x found outside pool area\n",current);
1008 DbgPrint("Size %d\n",current->Size);
1009 DbgPrint("Limits are %x %x\n",MiNonPagedPoolStart,
1010 MiNonPagedPoolStart+MiNonPagedPoolLength);
1011 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1012 }
1013 blocks_seen++;
1014 if (blocks_seen > EiNrUsedBlocks)
1015 {
1016 DbgPrint("Too many blocks on used list\n");
1017 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1018 }
1019 if (current->Used.ListEntry.Flink != &UsedBlockListHead &&
1020 current->Used.ListEntry.Flink->Blink != &current->Used.ListEntry)
1021 {
1022 DbgPrint("%s:%d:Break in list (current %x next %x "
1023 "current->next->previous %x)\n",
1024 __FILE__,__LINE__,current, current->Used.ListEntry.Flink,
1025 current->Used.ListEntry.Flink->Blink);
1026 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1027 }
1028
1029 current_entry = current_entry->Flink;
1030 }
1031 }
1032
1033 static void check_duplicates(BLOCK_HDR* blk)
1034 /*
1035 * FUNCTION: Check a block has no duplicates
1036 * ARGUMENTS:
1037 * blk = block to check
1038 * NOTE: Bug checks if duplicates are found
1039 */
1040 {
1041 char* base = (char*)blk;
1042 char* last = ((char*)blk) + BLOCK_HDR_SIZE + blk->Size;
1043 BLOCK_HDR* current;
1044 PLIST_ENTRY current_entry;
1045 PNODE p;
1046
1047 p = avl_get_first(FreeBlockListRoot);
1048
1049 while (p)
1050 {
1051 current = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
1052
1053 if (current->Magic != BLOCK_HDR_FREE_MAGIC)
1054 {
1055 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
1056 current);
1057 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1058 }
1059
1060 if ( (char*)current > base && (char*)current < last )
1061 {
1062 DbgPrint("intersecting blocks on list\n");
1063 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1064 }
1065 if ( (char*)current < base &&
1066 ((char*)current + current->Size + BLOCK_HDR_SIZE)
1067 > base )
1068 {
1069 DbgPrint("intersecting blocks on list\n");
1070 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1071 }
1072 p = avl_get_next(FreeBlockListRoot, p);
1073 }
1074
1075 current_entry = UsedBlockListHead.Flink;
1076 while (current_entry != &UsedBlockListHead)
1077 {
1078 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, Used.ListEntry);
1079
1080 if ( (char*)current > base && (char*)current < last )
1081 {
1082 DbgPrint("intersecting blocks on list\n");
1083 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1084 }
1085 if ( (char*)current < base &&
1086 ((char*)current + current->Size + BLOCK_HDR_SIZE)
1087 > base )
1088 {
1089 DbgPrint("intersecting blocks on list\n");
1090 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1091 }
1092
1093 current_entry = current_entry->Flink;
1094 }
1095
1096 }
1097
1098 static void validate_kernel_pool(void)
1099 /*
1100 * FUNCTION: Checks the integrity of the kernel memory heap
1101 */
1102 {
1103 BLOCK_HDR* current;
1104 PLIST_ENTRY current_entry;
1105 PNODE p;
1106
1107 validate_free_list();
1108 validate_used_list();
1109
1110 p = avl_get_first(FreeBlockListRoot);
1111 while (p)
1112 {
1113 current = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
1114 check_duplicates(current);
1115 p = avl_get_next(FreeBlockListRoot, p);
1116 }
1117 current_entry = UsedBlockListHead.Flink;
1118 while (current_entry != &UsedBlockListHead)
1119 {
1120 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, Used.ListEntry);
1121 check_duplicates(current);
1122 current_entry = current_entry->Flink;
1123 }
1124 }
1125 #endif
1126
1127 #if 0
1128 STATIC VOID
1129 free_pages(BLOCK_HDR* blk)
1130 {
1131 ULONG start;
1132 ULONG end;
1133 ULONG i;
1134
1135 start = (ULONG)blk;
1136 end = (ULONG)blk + BLOCK_HDR_SIZE + blk->Size;
1137
1138 /*
1139 * If the block doesn't contain a whole page then there is nothing to do
1140 */
1141 if (PAGE_ROUND_UP(start) >= PAGE_ROUND_DOWN(end))
1142 {
1143 return;
1144 }
1145 }
1146 #endif
1147
1148 static void remove_from_used_list(BLOCK_HDR* current)
1149 {
1150 RemoveEntryList(&current->Used.ListEntry);
1151 EiUsedNonPagedPool -= current->Size;
1152 EiNrUsedBlocks--;
1153 }
1154
1155 static void remove_from_free_list(BLOCK_HDR* current)
1156 {
1157 DPRINT("remove_from_free_list %d\n", current->Size);
1158
1159 avl_remove(&FreeBlockListRoot, &current->Free.Node, compare_node);
1160
1161 EiFreeNonPagedPool -= current->Size;
1162 EiNrFreeBlocks--;
1163 DPRINT("remove_from_free_list done\n");
1164 #ifdef DUMP_AVL
1165
1166 DumpFreeBlockTree();
1167 #endif
1168 }
1169
1170 static void
1171 add_to_free_list(BLOCK_HDR* blk)
1172 /*
1173 * FUNCTION: add the block to the free list (internal)
1174 */
1175 {
1176 BLOCK_HDR* current;
1177 BOOL UpdatePrevPtr = FALSE;
1178
1179 DPRINT("add_to_free_list (%d)\n", blk->Size);
1180
1181 EiNrFreeBlocks++;
1182
1183 current = blk->previous;
1184 if (current && current->Magic == BLOCK_HDR_FREE_MAGIC)
1185 {
1186 remove_from_free_list(current);
1187 current->Size = current->Size + BLOCK_HDR_SIZE + blk->Size;
1188 current->Magic = BLOCK_HDR_USED_MAGIC;
1189 memset(blk, 0xcc, BLOCK_HDR_SIZE);
1190 blk = current;
1191 UpdatePrevPtr = TRUE;
1192 }
1193
1194 current = (BLOCK_HDR*)((char*)blk + BLOCK_HDR_SIZE + blk->Size);
1195 if ((char*)current < (char*)MiNonPagedPoolStart + MiNonPagedPoolLength &&
1196 current->Magic == BLOCK_HDR_FREE_MAGIC)
1197 {
1198 remove_from_free_list(current);
1199 blk->Size += BLOCK_HDR_SIZE + current->Size;
1200 memset(current, 0xcc, BLOCK_HDR_SIZE);
1201 UpdatePrevPtr = TRUE;
1202 current = (BLOCK_HDR*)((char*)blk + BLOCK_HDR_SIZE + blk->Size);
1203 }
1204 if (UpdatePrevPtr &&
1205 (char*)current < (char*)MiNonPagedPoolStart + MiNonPagedPoolLength)
1206 {
1207 current->previous = blk;
1208 }
1209 DPRINT("%d\n", blk->Size);
1210 blk->Magic = BLOCK_HDR_FREE_MAGIC;
1211 EiFreeNonPagedPool += blk->Size;
1212 avl_insert(&FreeBlockListRoot, &blk->Free.Node, compare_node);
1213
1214 DPRINT("add_to_free_list done\n");
1215 #ifdef DUMP_AVL
1216
1217 DumpFreeBlockTree();
1218 #endif
1219 }
1220
1221 static void add_to_used_list(BLOCK_HDR* blk)
1222 /*
1223 * FUNCTION: add the block to the used list (internal)
1224 */
1225 {
1226 InsertHeadList(&UsedBlockListHead, &blk->Used.ListEntry);
1227 EiUsedNonPagedPool += blk->Size;
1228 EiNrUsedBlocks++;
1229 }
1230
1231 inline static void* block_to_address(BLOCK_HDR* blk)
1232 /*
1233 * FUNCTION: Translate a block header address to the corresponding block
1234 * address (internal)
1235 */
1236 {
1237 return ( (void *) ((char*)blk + BLOCK_HDR_SIZE));
1238 }
1239
1240 inline static BLOCK_HDR* address_to_block(void* addr)
1241 {
1242 return (BLOCK_HDR *)
1243 ( ((char*)addr) - BLOCK_HDR_SIZE );
1244 }
1245
1246 static BOOLEAN
1247 grow_block(BLOCK_HDR* blk, PVOID end)
1248 {
1249 NTSTATUS Status;
1250 PFN_TYPE Page[32];
1251 ULONG StartIndex, EndIndex;
1252 ULONG i, j, k;
1253
1254 StartIndex = (ULONG)((char*)(PVOID)PAGE_ROUND_UP((ULONG)((char*)blk + BLOCK_HDR_SIZE)) - (char*)MiNonPagedPoolStart) / PAGE_SIZE;
1255 EndIndex = (ULONG)((char*)PAGE_ROUND_UP(end) - (char*)MiNonPagedPoolStart) / PAGE_SIZE;
1256
1257
1258 for (i = StartIndex; i < EndIndex; i++)
1259 {
1260 if (!(MiNonPagedPoolAllocMap[i / 32] & (1 << (i % 32))))
1261 {
1262 for (j = i + 1; j < EndIndex && j - i < 32; j++)
1263 {
1264 if (MiNonPagedPoolAllocMap[j / 32] & (1 << (j % 32)))
1265 {
1266 break;
1267 }
1268 }
1269 for (k = 0; k < j - i; k++)
1270 {
1271 Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &Page[k]);
1272 if (!NT_SUCCESS(Status))
1273 {
1274 for (i = 0; i < k; i++)
1275 {
1276 MmReleasePageMemoryConsumer(MC_NPPOOL, Page[i]);
1277 }
1278 return FALSE;
1279 }
1280 }
1281 Status = MmCreateVirtualMapping(NULL,
1282 MiNonPagedPoolStart + i * PAGE_SIZE,
1283 PAGE_READWRITE|PAGE_SYSTEM,
1284 Page,
1285 k);
1286 if (!NT_SUCCESS(Status))
1287 {
1288 for (i = 0; i < k; i++)
1289 {
1290 MmReleasePageMemoryConsumer(MC_NPPOOL, Page[i]);
1291 }
1292 return FALSE;
1293 }
1294 for (j = i; j < k + i; j++)
1295 {
1296 MiNonPagedPoolAllocMap[j / 32] |= (1 << (j % 32));
1297 }
1298 MiNonPagedPoolNrOfPages += k;
1299 i += k - 1;
1300 }
1301 }
1302 return TRUE;
1303 }
1304
1305 static BLOCK_HDR* get_block(unsigned int size, unsigned long alignment)
1306 {
1307 BLOCK_HDR *blk, *current, *previous = NULL, *next = NULL, *best = NULL;
1308 ULONG previous_size = 0, current_size, next_size = 0, new_size;
1309 PVOID end;
1310 PVOID addr, aligned_addr;
1311 PNODE p;
1312
1313 DPRINT("get_block %d\n", size);
1314
1315 if (alignment == 0)
1316 {
1317 p = avl_find_equal_or_greater(FreeBlockListRoot, size, compare_value);
1318 if (p)
1319 {
1320 best = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
1321 addr = block_to_address(best);
1322 }
1323 }
1324 else
1325 {
1326 p = avl_find_equal_or_greater(FreeBlockListRoot, size, compare_value);
1327
1328 while(p)
1329 {
1330 current = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
1331 addr = block_to_address(current);
1332 /* calculate first aligned address available within this block */
1333 aligned_addr = MM_ROUND_UP(addr, alignment);
1334 /* check to see if this address is already aligned */
1335 if (addr == aligned_addr)
1336 {
1337 if (current->Size >= size &&
1338 (best == NULL || current->Size < best->Size))
1339 {
1340 best = current;
1341 }
1342 }
1343 else
1344 {
1345 /* make sure there's enough room to make a free block by the space skipped
1346 * from alignment. If not, calculate forward to the next alignment
1347 * and see if we allocate there...
1348 */
1349 new_size = (ULONG)aligned_addr - (ULONG)addr + size;
1350 if ((ULONG)aligned_addr - (ULONG)addr < BLOCK_HDR_SIZE)
1351 {
1352 /* not enough room for a free block header, add some more bytes */
1353 aligned_addr = MM_ROUND_UP(block_to_address((BLOCK_HDR*)((char*)current + BLOCK_HDR_SIZE)), alignment);
1354 new_size = (ULONG)aligned_addr - (ULONG)addr + size;
1355 }
1356 if (current->Size >= new_size &&
1357 (best == NULL || current->Size < best->Size))
1358 {
1359 best = current;
1360 }
1361 }
1362 if (best && current->Size >= size + alignment + 2 * BLOCK_HDR_SIZE)
1363 {
1364 break;
1365 }
1366 p = avl_get_next(FreeBlockListRoot, p);
1367
1368 }
1369 }
1370
1371 /*
1372 * We didn't find anything suitable at all.
1373 */
1374 if (best == NULL)
1375 {
1376 return NULL;
1377 }
1378
1379 current = best;
1380 current_size = current->Size;
1381
1382 if (alignment > 0)
1383 {
1384 addr = block_to_address(current);
1385 aligned_addr = MM_ROUND_UP(addr, alignment);
1386 if (addr != aligned_addr)
1387 {
1388 blk = address_to_block(aligned_addr);
1389 if ((char*)blk < (char*)current + BLOCK_HDR_SIZE)
1390 {
1391 aligned_addr = MM_ROUND_UP(block_to_address((BLOCK_HDR*)((char*)current + BLOCK_HDR_SIZE)), alignment);
1392 blk = address_to_block(aligned_addr);
1393 }
1394 /*
1395 * if size-aligned, break off the preceding bytes into their own block...
1396 */
1397 previous = current;
1398 previous_size = (ULONG)blk - (ULONG)previous - BLOCK_HDR_SIZE;
1399 current = blk;
1400 current_size -= ((ULONG)current - (ULONG)previous);
1401 }
1402 }
1403
1404 end = (char*)current + BLOCK_HDR_SIZE + size;
1405
1406 if (current_size >= size + BLOCK_HDR_SIZE + MM_POOL_ALIGNMENT)
1407 {
1408 /* create a new free block after our block, if the memory size is >= 4 byte for this block */
1409 next = (BLOCK_HDR*)((ULONG)current + size + BLOCK_HDR_SIZE);
1410 next_size = current_size - size - BLOCK_HDR_SIZE;
1411 current_size = size;
1412 end = (char*)next + BLOCK_HDR_SIZE;
1413 }
1414
1415 if (previous)
1416 {
1417 remove_from_free_list(previous);
1418 if (!grow_block(previous, end))
1419 {
1420 add_to_free_list(previous);
1421 return NULL;
1422 }
1423 memset(current, 0, BLOCK_HDR_SIZE);
1424 current->Size = current_size;
1425 current->Magic = BLOCK_HDR_USED_MAGIC;
1426 current->previous = previous;
1427 previous->Size = previous_size;
1428 if (next == NULL)
1429 {
1430 blk = (BLOCK_HDR*)((char*)current + BLOCK_HDR_SIZE + current->Size);
1431 if ((char*)blk < (char*)MiNonPagedPoolStart + MiNonPagedPoolLength)
1432 {
1433 blk->previous = current;
1434 }
1435 }
1436
1437 add_to_free_list(previous);
1438 }
1439 else
1440 {
1441 remove_from_free_list(current);
1442
1443 if (!grow_block(current, end))
1444 {
1445 add_to_free_list(current);
1446 return NULL;
1447 }
1448
1449 current->Magic = BLOCK_HDR_USED_MAGIC;
1450 if (next)
1451 {
1452 current->Size = current_size;
1453 }
1454 }
1455
1456 if (next)
1457 {
1458 memset(next, 0, BLOCK_HDR_SIZE);
1459
1460 next->Size = next_size;
1461 next->Magic = BLOCK_HDR_FREE_MAGIC;
1462 next->previous = current;
1463 blk = (BLOCK_HDR*)((char*)next + BLOCK_HDR_SIZE + next->Size);
1464 if ((char*)blk < (char*)MiNonPagedPoolStart + MiNonPagedPoolLength)
1465 {
1466 blk->previous = next;
1467 }
1468 add_to_free_list(next);
1469 }
1470
1471 add_to_used_list(current);
1472 VALIDATE_POOL;
1473 return current;
1474 }
1475
1476 #endif /* not WHOLE_PAGE_ALLOCATIONS */
1477
1478 VOID STDCALL ExFreeNonPagedPool (PVOID block)
1479 /*
1480 * FUNCTION: Releases previously allocated memory
1481 * ARGUMENTS:
1482 * block = block to free
1483 */
1484 {
1485 #ifdef WHOLE_PAGE_ALLOCATIONS /* WHOLE_PAGE_ALLOCATIONS */
1486 KIRQL oldIrql;
1487
1488 if (block == NULL)
1489 {
1490 return;
1491 }
1492
1493 DPRINT("freeing block %x\n",block);
1494
1495 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->size,
1496 ((PULONG)&block)[-1]);
1497
1498 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1499
1500 ExFreeWholePageBlock(block);
1501 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1502
1503 #else /* not WHOLE_PAGE_ALLOCATIONS */
1504
1505 BLOCK_HDR* blk=address_to_block(block);
1506 KIRQL oldIrql;
1507
1508 if (block == NULL)
1509 {
1510 return;
1511 }
1512
1513 DPRINT("freeing block %x\n",blk);
1514
1515 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->Size,
1516 ((PULONG)&block)[-1]);
1517
1518 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1519
1520 VALIDATE_POOL;
1521
1522 if (blk->Magic != BLOCK_HDR_USED_MAGIC)
1523 {
1524 if (blk->Magic == BLOCK_HDR_FREE_MAGIC)
1525 {
1526 DbgPrint("ExFreePool of already freed address %x\n", block);
1527 }
1528 else
1529 {
1530 DbgPrint("ExFreePool of non-allocated address %x (magic %x)\n",
1531 block, blk->Magic);
1532 }
1533 KEBUGCHECK(0);
1534 return;
1535 }
1536
1537 memset(block, 0xcc, blk->Size);
1538 #ifdef TAG_STATISTICS_TRACKING
1539
1540 MiRemoveFromTagHashTable(blk);
1541 #endif
1542
1543 remove_from_used_list(blk);
1544 blk->Magic = BLOCK_HDR_FREE_MAGIC;
1545 add_to_free_list(blk);
1546 VALIDATE_POOL;
1547 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1548
1549 #endif /* WHOLE_PAGE_ALLOCATIONS */
1550 }
1551
1552 PVOID STDCALL
1553 ExAllocateNonPagedPoolWithTag(ULONG Type, ULONG Size, ULONG Tag, PVOID Caller)
1554 {
1555 #ifdef WHOLE_PAGE_ALLOCATIONS
1556 PVOID block;
1557 KIRQL oldIrql;
1558
1559 ASSERT_IRQL(DISPATCH_LEVEL);
1560
1561 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
1562 Size,Caller);
1563
1564 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1565
1566 /*
1567 * accomodate this useful idiom
1568 */
1569 if (Size == 0)
1570 {
1571 POOL_TRACE("= NULL\n");
1572 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1573 return(NULL);
1574 }
1575
1576 block = ExAllocateWholePageBlock(Size);
1577 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1578 if (NULL == block)
1579 {
1580 DPRINT1("Trying to allocate %lu bytes from nonpaged pool - nothing suitable found, returning NULL\n",
1581 Size );
1582 }
1583 return(block);
1584
1585 #else /* not WHOLE_PAGE_ALLOCATIONS */
1586
1587 PVOID block;
1588 BLOCK_HDR* best = NULL;
1589 KIRQL oldIrql;
1590 ULONG alignment;
1591
1592 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
1593 Size,Caller);
1594
1595 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1596
1597 VALIDATE_POOL;
1598
1599 #if 0
1600 /* after some allocations print the npaged pool stats */
1601 #ifdef TAG_STATISTICS_TRACKING
1602
1603 {
1604 static ULONG counter = 0;
1605 if (counter++ % 100000 == 0)
1606 {
1607 MiDebugDumpNonPagedPoolStats(FALSE);
1608 }
1609 }
1610 #endif
1611 #endif
1612 /*
1613 * accomodate this useful idiom
1614 */
1615 if (Size == 0)
1616 {
1617 POOL_TRACE("= NULL\n");
1618 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1619 return(NULL);
1620 }
1621 /* Make the size dword alligned, this makes the block dword alligned */
1622 Size = ROUND_UP(Size, MM_POOL_ALIGNMENT);
1623
1624 if (Size >= PAGE_SIZE)
1625 {
1626 alignment = PAGE_SIZE;
1627 }
1628 else if (Type == NonPagedPoolCacheAligned ||
1629 Type == NonPagedPoolCacheAlignedMustS)
1630 {
1631 alignment = MM_CACHE_LINE_SIZE;
1632 }
1633 else
1634 {
1635 alignment = 0;
1636 }
1637
1638 best = get_block(Size, alignment);
1639 if (best == NULL)
1640 {
1641 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1642 DPRINT1("Trying to allocate %lu bytes from nonpaged pool - nothing suitable found, returning NULL\n",
1643 Size );
1644 return NULL;
1645 }
1646 best->Used.Tag = Tag;
1647 best->Used.Caller = Caller;
1648 best->Used.Dumped = FALSE;
1649 best->Used.TagListEntry.Flink = best->Used.TagListEntry.Blink = NULL;
1650 #ifdef TAG_STATISTICS_TRACKING
1651
1652 MiAddToTagHashTable(best);
1653 #endif
1654
1655 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1656 block = block_to_address(best);
1657 /* RtlZeroMemory(block, Size);*/
1658 return(block);
1659 #endif /* WHOLE_PAGE_ALLOCATIONS */
1660 }
1661
1662 #ifdef WHOLE_PAGE_ALLOCATIONS
1663
1664 PVOID STDCALL
1665 ExAllocateWholePageBlock(ULONG Size)
1666 {
1667 PVOID Address;
1668 PFN_TYPE Page;
1669 ULONG i;
1670 ULONG NrPages;
1671 ULONG Base;
1672
1673 NrPages = ROUND_UP(Size, PAGE_SIZE) / PAGE_SIZE;
1674
1675 Base = RtlFindClearBitsAndSet(&NonPagedPoolAllocMap, NrPages + 1, NonPagedPoolAllocMapHint);
1676 if (Base == 0xffffffff)
1677 {
1678 DbgPrint("Out of non paged pool space.\n");
1679 KEBUGCHECK(0);
1680 }
1681 if (NonPagedPoolAllocMapHint == Base)
1682 {
1683 NonPagedPoolAllocMapHint += (NrPages + 1);
1684 }
1685
1686 Address = MiNonPagedPoolStart + Base * PAGE_SIZE;
1687
1688 for (i = 0; i < NrPages; i++)
1689 {
1690 Page = MmAllocPage(MC_NPPOOL, 0);
1691 if (Page == 0)
1692 {
1693 KEBUGCHECK(0);
1694 }
1695 MmCreateVirtualMapping(NULL,
1696 Address + (i * PAGE_SIZE),
1697 PAGE_READWRITE | PAGE_SYSTEM,
1698 &Page,
1699 TRUE);
1700 }
1701
1702 MiCurrentNonPagedPoolLength = max(MiCurrentNonPagedPoolLength, (Base + NrPages) * PAGE_SIZE);
1703 Size = (Size + 7) & ~7;
1704 Address = ((PVOID)((PUCHAR)Address + (NrPages * PAGE_SIZE) - Size));
1705
1706 DPRINT("WPALLOC: %x (%d)\n", Address, Size);
1707
1708 return Address;
1709 }
1710
1711 VOID STDCALL
1712 ExFreeWholePageBlock(PVOID Addr)
1713 {
1714 ULONG Base;
1715
1716 if (Addr < MiNonPagedPoolStart ||
1717 Addr >= (MiNonPagedPoolStart + MiCurrentNonPagedPoolLength))
1718 {
1719 DbgPrint("Block %x found outside pool area\n", Addr);
1720 KEBUGCHECK(0);
1721 }
1722 Base = (Addr - MiNonPagedPoolStart) / PAGE_SIZE;
1723 NonPagedPoolAllocMapHint = min(NonPagedPoolAllocMapHint, Base);
1724 while (MmIsPagePresent(NULL, Addr))
1725 {
1726 MmDeleteVirtualMapping(NULL,
1727 Addr,
1728 TRUE,
1729 NULL,
1730 NULL);
1731 RtlClearBits(&NonPagedPoolAllocMap, Base, 1);
1732 Base++;
1733 Addr += PAGE_SIZE;
1734 }
1735 }
1736
1737 #endif /* WHOLE_PAGE_ALLOCATIONS */
1738
1739 /* Whole Page Allocations note:
1740 *
1741 * We need enough pages for these things:
1742 *
1743 * 1) bitmap buffer
1744 * 2) hdr
1745 * 3) actual pages
1746 *
1747 */
1748 VOID INIT_FUNCTION
1749 MiInitializeNonPagedPool(VOID)
1750 {
1751 NTSTATUS Status;
1752 PFN_TYPE Page;
1753 ULONG i;
1754 PVOID Address;
1755 #ifdef WHOLE_PAGE_ALLOCATIONS
1756 #else
1757
1758 BLOCK_HDR* blk;
1759 #endif
1760 #ifdef TAG_STATISTICS_TRACKING
1761
1762 for (i = 0; i < TAG_HASH_TABLE_SIZE; i++)
1763 {
1764 InitializeListHead(&tag_hash_table[i]);
1765 }
1766 #endif
1767 KeInitializeSpinLock(&MmNpoolLock);
1768 InitializeListHead(&UsedBlockListHead);
1769 InitializeListHead(&AddressListHead);
1770 FreeBlockListRoot = NULL;
1771 #ifdef WHOLE_PAGE_ALLOCATIONS
1772
1773 NonPagedPoolAllocMapHint = PAGE_ROUND_UP(MiNonPagedPoolLength / PAGE_SIZE / 8) / PAGE_SIZE; /* Pages of bitmap buffer */
1774 MiCurrentNonPagedPoolLength = NonPagedPoolAllocMapHint * PAGE_SIZE;
1775 Address = MiNonPagedPoolStart;
1776 for (i = 0; i < NonPagedPoolAllocMapHint; i++)
1777 {
1778 Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &Page);
1779 if (!NT_SUCCESS(Status))
1780 {
1781 DbgPrint("Unable to allocate a page\n");
1782 KEBUGCHECK(0);
1783 }
1784 Status = MmCreateVirtualMapping(NULL,
1785 Address,
1786 PAGE_READWRITE|PAGE_SYSTEM,
1787 &Page,
1788 1);
1789 if (!NT_SUCCESS(Status))
1790 {
1791 DbgPrint("Unable to create virtual mapping\n");
1792 KEBUGCHECK(0);
1793 }
1794 Address += PAGE_SIZE;
1795 }
1796 RtlInitializeBitMap(&NonPagedPoolAllocMap, MiNonPagedPoolStart,
1797 MiNonPagedPoolLength / PAGE_SIZE);
1798 RtlClearAllBits(&NonPagedPoolAllocMap);
1799 RtlSetBits(&NonPagedPoolAllocMap, 0, NonPagedPoolAllocMapHint);
1800 #else
1801
1802 MiNonPagedPoolAllocMap = block_to_address((BLOCK_HDR*)MiNonPagedPoolStart);
1803 MiNonPagedPoolNrOfPages = PAGE_ROUND_UP(ROUND_UP(MiNonPagedPoolLength / PAGE_SIZE, 32) / 8 + 2 * BLOCK_HDR_SIZE);
1804 MiNonPagedPoolNrOfPages /= PAGE_SIZE;
1805 Address = MiNonPagedPoolStart;
1806 for (i = 0; i < MiNonPagedPoolNrOfPages; i++)
1807 {
1808 Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &Page);
1809 if (!NT_SUCCESS(Status))
1810 {
1811 DbgPrint("Unable to allocate a page\n");
1812 KEBUGCHECK(0);
1813 }
1814 Status = MmCreateVirtualMapping(NULL,
1815 Address,
1816 PAGE_READWRITE|PAGE_SYSTEM,
1817 &Page,
1818 1);
1819 if (!NT_SUCCESS(Status))
1820 {
1821 DbgPrint("Unable to create virtual mapping\n");
1822 KEBUGCHECK(0);
1823 }
1824 MiNonPagedPoolAllocMap[i / 32] |= (1 << (i % 32));
1825 Address = (PVOID)((ULONG_PTR)Address + PAGE_SIZE);
1826 }
1827 /* the first block contains the non paged pool bitmap */
1828 blk = (BLOCK_HDR*)MiNonPagedPoolStart;
1829 memset(blk, 0, BLOCK_HDR_SIZE);
1830 blk->Magic = BLOCK_HDR_USED_MAGIC;
1831 blk->Size = ROUND_UP(MiNonPagedPoolLength / PAGE_SIZE, 32) / 8;
1832 blk->previous = NULL;
1833 blk->Used.Tag = 0xffffffff;
1834 blk->Used.Caller = 0;
1835 blk->Used.Dumped = FALSE;
1836 add_to_used_list(blk);
1837 #ifdef TAG_STATISTICS_TRACKING
1838
1839 MiAddToTagHashTable(blk);
1840 #endif
1841 /* the second block is the first free block */
1842 blk = (BLOCK_HDR*)((char*)blk + BLOCK_HDR_SIZE + blk->Size);
1843 memset(blk, 0, BLOCK_HDR_SIZE);
1844 memset((char*)blk + BLOCK_HDR_SIZE, 0x0cc, MiNonPagedPoolNrOfPages * PAGE_SIZE - ((ULONG)blk + BLOCK_HDR_SIZE - (ULONG)MiNonPagedPoolStart));
1845 blk->Magic = BLOCK_HDR_FREE_MAGIC;
1846 blk->Size = MiNonPagedPoolLength - ((ULONG)blk + BLOCK_HDR_SIZE - (ULONG)MiNonPagedPoolStart);
1847 blk->previous = (BLOCK_HDR*)MiNonPagedPoolStart;
1848 add_to_free_list(blk);
1849 #endif
1850 }
1851
1852 PVOID
1853 STDCALL
1854 MiAllocateSpecialPool (IN POOL_TYPE PoolType,
1855 IN SIZE_T NumberOfBytes,
1856 IN ULONG Tag,
1857 IN ULONG Underrun
1858 )
1859 {
1860 /* FIXME: Special Pools not Supported */
1861 DbgPrint("Special Pools not supported\n");
1862 return 0;
1863 }
1864
1865 /* EOF */