30fea119881e41caf0a35c31a7bb095532a46057
[reactos.git] / reactos / ntoskrnl / mm / npool.c
1 /* $Id$
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/npool.c
6 * PURPOSE: Implements the kernel memory pool
7 * PROGRAMMER: David Welch (welch@cwcom.net)
8 * UPDATE HISTORY:
9 * 27/05/98: Created
10 * 10/06/98: Bug fixes by Iwan Fatahi (i_fatahi@hotmail.com)
11 * in take_block (if current bigger than required)
12 * in remove_from_used_list
13 * in ExFreePool
14 * 23/08/98: Fixes from Robert Bergkvist (fragdance@hotmail.com)
15 */
16
17 /* INCLUDES ****************************************************************/
18
19 #include <ntoskrnl.h>
20 #define NDEBUG
21 #include <internal/debug.h>
22
23 #ifdef ENABLE_VALIDATE_POOL
24 #define VALIDATE_POOL validate_kernel_pool()
25 #else
26 #define VALIDATE_POOL
27 #endif
28
29 #if 0
30 #define POOL_TRACE(args...) do { DbgPrint(args); } while(0);
31 #else
32 #if defined(__GNUC__)
33 #define POOL_TRACE(args...)
34 #else
35 #define POOL_TRACE
36 #endif /* __GNUC__ */
37 #endif
38
39 /* avl types ****************************************************************/
40
41 /* FIXME:
42 * This declarations should be moved into a separate header file.
43 */
44
45 typedef struct _NODE
46 {
47 struct _NODE* link[2];
48 struct _NODE* parent;
49 signed char balance;
50 }
51 NODE, *PNODE;
52
53 /* TYPES *******************************************************************/
54
55 #define BLOCK_HDR_USED_MAGIC (0xdeadbeef)
56 #define BLOCK_HDR_FREE_MAGIC (0xceadbeef)
57
58 /*
59 * fields present at the start of a block (this is for internal use only)
60 */
61 typedef struct _BLOCK_HDR
62 {
63 ULONG Magic;
64 ULONG Size;
65 struct _BLOCK_HDR* previous;
66 union
67 {
68 struct
69 {
70 LIST_ENTRY ListEntry;
71 ULONG Tag;
72 PVOID Caller;
73 LIST_ENTRY TagListEntry;
74 BOOLEAN Dumped;
75 }
76 Used;
77 struct
78 {
79 NODE Node;
80 }
81 Free;
82 };
83 }
84 BLOCK_HDR;
85
86 #define BLOCK_HDR_SIZE ROUND_UP(sizeof(BLOCK_HDR), MM_POOL_ALIGNMENT)
87
88 PVOID STDCALL
89 ExAllocateWholePageBlock(ULONG Size);
90 VOID STDCALL
91 ExFreeWholePageBlock(PVOID Addr);
92
93 /* GLOBALS *****************************************************************/
94
95 extern PVOID MiNonPagedPoolStart;
96 extern ULONG MiNonPagedPoolLength;
97
98 /*
99 * Head of the list of free blocks
100 */
101 static PNODE FreeBlockListRoot = NULL;
102
103 /*
104 * Head of the list of in use block
105 */
106 static LIST_ENTRY UsedBlockListHead;
107
108 static LIST_ENTRY AddressListHead;
109
110 #ifndef WHOLE_PAGE_ALLOCATIONS
111 /*
112 * Count of free blocks
113 */
114 static ULONG EiNrFreeBlocks = 0;
115
116 /*
117 * Count of used blocks
118 */
119 static ULONG EiNrUsedBlocks = 0;
120 #endif
121
122 /*
123 * Lock that protects the non-paged pool data structures
124 */
125 static KSPIN_LOCK MmNpoolLock;
126
127 /*
128 * Total memory used for free nonpaged pool blocks
129 */
130 ULONG EiFreeNonPagedPool = 0;
131
132 /*
133 * Total memory used for nonpaged pool blocks
134 */
135 ULONG EiUsedNonPagedPool = 0;
136
137 /* Total quota for Non Paged Pool */
138 ULONG MmTotalNonPagedPoolQuota = 0;
139
140 /*
141 * Allocate a range of memory in the nonpaged pool
142 */
143 PVOID
144 MiAllocNonPagedPoolRegion(unsigned int nr_pages);
145
146 VOID
147 MiFreeNonPagedPoolRegion(PVOID Addr, ULONG Count, BOOLEAN Free);
148
149 #ifdef TAG_STATISTICS_TRACKING
150 #define TAG_HASH_TABLE_SIZE (1024)
151 static LIST_ENTRY tag_hash_table[TAG_HASH_TABLE_SIZE];
152 #endif /* TAG_STATISTICS_TRACKING */
153
154 #ifdef WHOLE_PAGE_ALLOCATIONS
155 static RTL_BITMAP NonPagedPoolAllocMap;
156 static ULONG NonPagedPoolAllocMapHint;
157 static ULONG MiCurrentNonPagedPoolLength = 0;
158 #else
159 static PULONG MiNonPagedPoolAllocMap;
160 static ULONG MiNonPagedPoolNrOfPages;
161 #endif /* WHOLE_PAGE_ALLOCATIONS */
162
163 /* avl helper functions ****************************************************/
164
165 void DumpFreeBlockNode(PNODE p)
166 {
167 static int count = 0;
168 BLOCK_HDR* blk;
169
170 count++;
171
172 if (p)
173 {
174 DumpFreeBlockNode(p->link[0]);
175 blk = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
176 DbgPrint("%08x %8d (%d)\n", blk, blk->Size, count);
177 DumpFreeBlockNode(p->link[1]);
178 }
179
180 count--;
181 }
182 void DumpFreeBlockTree(void)
183 {
184 DbgPrint("--- Begin tree ------------------\n");
185 DbgPrint("%08x\n", CONTAINING_RECORD(FreeBlockListRoot, BLOCK_HDR, Free.Node));
186 DumpFreeBlockNode(FreeBlockListRoot);
187 DbgPrint("--- End tree --------------------\n");
188 }
189
190 int compare_node(PNODE p1, PNODE p2)
191 {
192 BLOCK_HDR* blk1 = CONTAINING_RECORD(p1, BLOCK_HDR, Free.Node);
193 BLOCK_HDR* blk2 = CONTAINING_RECORD(p2, BLOCK_HDR, Free.Node);
194
195 if (blk1->Size == blk2->Size)
196 {
197 if (blk1 < blk2)
198 {
199 return -1;
200 }
201 if (blk1 > blk2)
202 {
203 return 1;
204 }
205 }
206 else
207 {
208 if (blk1->Size < blk2->Size)
209 {
210 return -1;
211 }
212 if (blk1->Size > blk2->Size)
213 {
214 return 1;
215 }
216 }
217 return 0;
218
219 }
220
221 int compare_value(PVOID value, PNODE p)
222 {
223 BLOCK_HDR* blk = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
224 ULONG v = *(PULONG)value;
225
226 if (v < blk->Size)
227 {
228 return -1;
229 }
230 if (v > blk->Size)
231 {
232 return 1;
233 }
234 return 0;
235 }
236
237 /* avl functions **********************************************************/
238
239 /* FIXME:
240 * The avl functions should be moved into a separate file.
241 */
242
243 /* The avl_insert and avl_remove are based on libavl (library for manipulation of binary trees). */
244
245 void avl_insert (PNODE * root, PNODE n, int (*compare)(PNODE, PNODE))
246 {
247 PNODE y; /* Top node to update balance factor, and parent. */
248 PNODE p, q; /* Iterator, and parent. */
249 PNODE w; /* New root of rebalanced subtree. */
250 int dir = 0; /* Direction to descend. */
251
252 n->link[0] = n->link[1] = n->parent = NULL;
253 n->balance = 0;
254
255 y = *root;
256 for (q = NULL, p = *root; p != NULL; q = p, p = p->link[dir])
257 {
258 dir = compare(n, p) > 0;
259 if (p->balance != 0)
260 {
261 y = p;
262 }
263 }
264
265 n->parent = q;
266 if (q != NULL)
267 {
268 q->link[dir] = n;
269 }
270 else
271 {
272 *root = n;
273 }
274
275 if (*root == n)
276 {
277 return;
278 }
279
280 for (p = n; p != y; p = q)
281 {
282 q = p->parent;
283 dir = q->link[0] != p;
284 if (dir == 0)
285 {
286 q->balance--;
287 }
288 else
289 {
290 q->balance++;
291 }
292 }
293
294 if (y->balance == -2)
295 {
296 PNODE x = y->link[0];
297 if (x->balance == -1)
298 {
299 w = x;
300 y->link[0] = x->link[1];
301 x->link[1] = y;
302 x->balance = y->balance = 0;
303 x->parent = y->parent;
304 y->parent = x;
305 if (y->link[0] != NULL)
306 {
307 y->link[0]->parent = y;
308 }
309 }
310 else
311 {
312 ASSERT(x->balance == +1);
313 w = x->link[1];
314 x->link[1] = w->link[0];
315 w->link[0] = x;
316 y->link[0] = w->link[1];
317 w->link[1] = y;
318 if (w->balance == -1)
319 {
320 x->balance = 0;
321 y->balance = +1;
322 }
323 else if (w->balance == 0)
324 {
325 x->balance = y->balance = 0;
326 }
327 else /* |w->pavl_balance == +1| */
328 {
329 x->balance = -1;
330 y->balance = 0;
331 }
332 w->balance = 0;
333 w->parent = y->parent;
334 x->parent = y->parent = w;
335 if (x->link[1] != NULL)
336 {
337 x->link[1]->parent = x;
338 }
339 if (y->link[0] != NULL)
340 {
341 y->link[0]->parent = y;
342 }
343 }
344 }
345 else if (y->balance == +2)
346 {
347 PNODE x = y->link[1];
348 if (x->balance == +1)
349 {
350 w = x;
351 y->link[1] = x->link[0];
352 x->link[0] = y;
353 x->balance = y->balance = 0;
354 x->parent = y->parent;
355 y->parent = x;
356 if (y->link[1] != NULL)
357 {
358 y->link[1]->parent = y;
359 }
360 }
361 else
362 {
363 ASSERT(x->balance == -1);
364 w = x->link[0];
365 x->link[0] = w->link[1];
366 w->link[1] = x;
367 y->link[1] = w->link[0];
368 w->link[0] = y;
369 if (w->balance == 1)
370 {
371 x->balance = 0;
372 y->balance = -1;
373 }
374 else if (w->balance == 0)
375 {
376 x->balance = y->balance = 0;
377 }
378 else /* |w->pavl_balance == -1| */
379 {
380 x->balance = +1;
381 y->balance = 0;
382 }
383 w->balance = 0;
384 w->parent = y->parent;
385 x->parent = y->parent = w;
386 if (x->link[0] != NULL)
387 {
388 x->link[0]->parent = x;
389 }
390 if (y->link[1] != NULL)
391 {
392 y->link[1]->parent = y;
393 }
394 }
395 }
396 else
397 {
398 return;
399 }
400 if (w->parent != NULL)
401 {
402 w->parent->link[y != w->parent->link[0]] = w;
403 }
404 else
405 {
406 *root = w;
407 }
408
409 return;
410 }
411
412 void avl_remove (PNODE *root, PNODE item, int (*compare)(PNODE, PNODE))
413 {
414 PNODE p; /* Traverses tree to find node to delete. */
415 PNODE q; /* Parent of |p|. */
416 int dir; /* Side of |q| on which |p| is linked. */
417
418 if (root == NULL || *root == NULL)
419 {
420 return ;
421 }
422
423 p = item;
424 q = p->parent;
425 if (q == NULL)
426 {
427 q = (PNODE) root;
428 dir = 0;
429 }
430 else
431 {
432 dir = compare(p, q) > 0;
433 }
434
435 if (p->link[1] == NULL)
436 {
437 q->link[dir] = p->link[0];
438 if (q->link[dir] != NULL)
439 {
440 q->link[dir]->parent = p->parent;
441 }
442 }
443 else
444 {
445 PNODE r = p->link[1];
446 if (r->link[0] == NULL)
447 {
448 r->link[0] = p->link[0];
449 q->link[dir] = r;
450 r->parent = p->parent;
451 if (r->link[0] != NULL)
452 {
453 r->link[0]->parent = r;
454 }
455 r->balance = p->balance;
456 q = r;
457 dir = 1;
458 }
459 else
460 {
461 PNODE s = r->link[0];
462 while (s->link[0] != NULL)
463 {
464 s = s->link[0];
465 }
466 r = s->parent;
467 r->link[0] = s->link[1];
468 s->link[0] = p->link[0];
469 s->link[1] = p->link[1];
470 q->link[dir] = s;
471 if (s->link[0] != NULL)
472 {
473 s->link[0]->parent = s;
474 }
475 s->link[1]->parent = s;
476 s->parent = p->parent;
477 if (r->link[0] != NULL)
478 {
479 r->link[0]->parent = r;
480 }
481 s->balance = p->balance;
482 q = r;
483 dir = 0;
484 }
485 }
486
487 item->link[0] = item->link[1] = item->parent = NULL;
488 item->balance = 0;
489
490 while (q != (PNODE) root)
491 {
492 PNODE y = q;
493
494 if (y->parent != NULL)
495 {
496 q = y->parent;
497 }
498 else
499 {
500 q = (PNODE) root;
501 }
502
503 if (dir == 0)
504 {
505 dir = q->link[0] != y;
506 y->balance++;
507 if (y->balance == +1)
508 {
509 break;
510 }
511 else if (y->balance == +2)
512 {
513 PNODE x = y->link[1];
514 if (x->balance == -1)
515 {
516 PNODE w;
517
518 ASSERT(x->balance == -1);
519 w = x->link[0];
520 x->link[0] = w->link[1];
521 w->link[1] = x;
522 y->link[1] = w->link[0];
523 w->link[0] = y;
524 if (w->balance == +1)
525 {
526 x->balance = 0;
527 y->balance = -1;
528 }
529 else if (w->balance == 0)
530 {
531 x->balance = y->balance = 0;
532 }
533 else /* |w->pavl_balance == -1| */
534 {
535 x->balance = +1;
536 y->balance = 0;
537 }
538 w->balance = 0;
539 w->parent = y->parent;
540 x->parent = y->parent = w;
541 if (x->link[0] != NULL)
542 {
543 x->link[0]->parent = x;
544 }
545 if (y->link[1] != NULL)
546 {
547 y->link[1]->parent = y;
548 }
549 q->link[dir] = w;
550 }
551 else
552 {
553 y->link[1] = x->link[0];
554 x->link[0] = y;
555 x->parent = y->parent;
556 y->parent = x;
557 if (y->link[1] != NULL)
558 {
559 y->link[1]->parent = y;
560 }
561 q->link[dir] = x;
562 if (x->balance == 0)
563 {
564 x->balance = -1;
565 y->balance = +1;
566 break;
567 }
568 else
569 {
570 x->balance = y->balance = 0;
571 y = x;
572 }
573 }
574 }
575 }
576 else
577 {
578 dir = q->link[0] != y;
579 y->balance--;
580 if (y->balance == -1)
581 {
582 break;
583 }
584 else if (y->balance == -2)
585 {
586 PNODE x = y->link[0];
587 if (x->balance == +1)
588 {
589 PNODE w;
590 ASSERT(x->balance == +1);
591 w = x->link[1];
592 x->link[1] = w->link[0];
593 w->link[0] = x;
594 y->link[0] = w->link[1];
595 w->link[1] = y;
596 if (w->balance == -1)
597 {
598 x->balance = 0;
599 y->balance = +1;
600 }
601 else if (w->balance == 0)
602 {
603 x->balance = y->balance = 0;
604 }
605 else /* |w->pavl_balance == +1| */
606 {
607 x->balance = -1;
608 y->balance = 0;
609 }
610 w->balance = 0;
611 w->parent = y->parent;
612 x->parent = y->parent = w;
613 if (x->link[1] != NULL)
614 {
615 x->link[1]->parent = x;
616 }
617 if (y->link[0] != NULL)
618 {
619 y->link[0]->parent = y;
620 }
621 q->link[dir] = w;
622 }
623 else
624 {
625 y->link[0] = x->link[1];
626 x->link[1] = y;
627 x->parent = y->parent;
628 y->parent = x;
629 if (y->link[0] != NULL)
630 {
631 y->link[0]->parent = y;
632 }
633 q->link[dir] = x;
634 if (x->balance == 0)
635 {
636 x->balance = +1;
637 y->balance = -1;
638 break;
639 }
640 else
641 {
642 x->balance = y->balance = 0;
643 y = x;
644 }
645 }
646 }
647 }
648 }
649
650 }
651
652 PNODE _cdecl avl_get_first(PNODE root)
653 {
654 PNODE p;
655 if (root == NULL)
656 {
657 return NULL;
658 }
659 p = root;
660 while (p->link[0])
661 {
662 p = p->link[0];
663 }
664 return p;
665 }
666
667 PNODE avl_get_next(PNODE root, PNODE p)
668 {
669 PNODE q;
670 if (p->link[1])
671 {
672 p = p->link[1];
673 while(p->link[0])
674 {
675 p = p->link[0];
676 }
677 return p;
678 }
679 else
680 {
681 q = p->parent;
682 while (q && q->link[1] == p)
683 {
684 p = q;
685 q = q->parent;
686 }
687 if (q == NULL)
688 {
689 return NULL;
690 }
691 else
692 {
693 return q;
694 }
695 }
696 }
697
698 PNODE avl_find_equal_or_greater(PNODE root, ULONG size, int (compare)(PVOID, PNODE))
699 {
700 PNODE p;
701 PNODE prev = NULL;
702 int cmp;
703
704 for (p = root; p != NULL;)
705 {
706 cmp = compare((PVOID)&size, p);
707 if (cmp < 0)
708 {
709 prev = p;
710 p = p->link[0];
711 }
712 else if (cmp > 0)
713 {
714 p = p->link[1];
715 }
716 else
717 {
718 while (p->link[0])
719 {
720 cmp = compare((PVOID)&size, p->link[0]);
721 if (cmp != 0)
722 {
723 break;
724 }
725 p = p->link[0];
726 }
727 return p;
728 }
729 }
730 return prev;
731 }
732
733 /* non paged pool functions ************************************************/
734
735 #ifdef TAG_STATISTICS_TRACKING
736 VOID
737 MiRemoveFromTagHashTable(BLOCK_HDR* block)
738 /*
739 * Remove a block from the tag hash table
740 */
741 {
742 if (block->Used.Tag == 0)
743 {
744 return;
745 }
746
747 RemoveEntryList(&block->Used.TagListEntry);
748 }
749
750 VOID
751 MiAddToTagHashTable(BLOCK_HDR* block)
752 /*
753 * Add a block to the tag hash table
754 */
755 {
756 ULONG hash;
757
758 if (block->Used.Tag == 0)
759 {
760 return;
761 }
762
763 hash = block->Used.Tag % TAG_HASH_TABLE_SIZE;
764
765 InsertHeadList(&tag_hash_table[hash], &block->Used.TagListEntry);
766 }
767 #endif /* TAG_STATISTICS_TRACKING */
768
769 #if defined(TAG_STATISTICS_TRACKING) && !defined(WHOLE_PAGE_ALLOCATIONS)
770 VOID STATIC
771 MiDumpTagStats(ULONG CurrentTag, ULONG CurrentNrBlocks, ULONG CurrentSize)
772 {
773 CHAR c1, c2, c3, c4;
774
775 c1 = (CHAR)((CurrentTag >> 24) & 0xFF);
776 c2 = (CHAR)((CurrentTag >> 16) & 0xFF);
777 c3 = (CHAR)((CurrentTag >> 8) & 0xFF);
778 c4 = (CHAR)(CurrentTag & 0xFF);
779
780 if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
781 {
782 DbgPrint("Tag %x (%c%c%c%c) Blocks %d Total Size %d Average Size %d\n",
783 CurrentTag, c4, c3, c2, c1, CurrentNrBlocks,
784 CurrentSize, CurrentSize / CurrentNrBlocks);
785 }
786 else
787 {
788 DbgPrint("Tag %x Blocks %d Total Size %d Average Size %d\n",
789 CurrentTag, CurrentNrBlocks, CurrentSize,
790 CurrentSize / CurrentNrBlocks);
791 }
792 }
793 #endif /* defined(TAG_STATISTICS_TRACKING) && !defined(WHOLE_PAGE_ALLOCATIONS); */
794
795 VOID
796 MiDebugDumpNonPagedPoolStats(BOOLEAN NewOnly)
797 {
798 #if defined(TAG_STATISTICS_TRACKING) && !defined(WHOLE_PAGE_ALLOCATIONS)
799 ULONG i;
800 BLOCK_HDR* current;
801 ULONG CurrentTag;
802 ULONG CurrentNrBlocks = 0;
803 ULONG CurrentSize = 0;
804 ULONG TotalBlocks;
805 ULONG TotalSize;
806 ULONG Size;
807 LIST_ENTRY tmpListHead;
808 PLIST_ENTRY current_entry;
809
810 DbgPrint("******* Dumping non paging pool stats ******\n");
811 TotalBlocks = 0;
812 TotalSize = 0;
813 for (i = 0; i < TAG_HASH_TABLE_SIZE; i++)
814 {
815 InitializeListHead(&tmpListHead);
816
817 while (!IsListEmpty(&tag_hash_table[i]))
818 {
819 CurrentTag = 0;
820
821 current_entry = tag_hash_table[i].Flink;
822 while (current_entry != &tag_hash_table[i])
823 {
824 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, Used.TagListEntry);
825 current_entry = current_entry->Flink;
826 if (CurrentTag == 0)
827 {
828 CurrentTag = current->Used.Tag;
829 CurrentNrBlocks = 0;
830 CurrentSize = 0;
831 }
832 if (current->Used.Tag == CurrentTag)
833 {
834 RemoveEntryList(&current->Used.TagListEntry);
835 InsertHeadList(&tmpListHead, &current->Used.TagListEntry);
836 if (!NewOnly || !current->Used.Dumped)
837 {
838 CurrentNrBlocks++;
839 TotalBlocks++;
840 CurrentSize += current->Size;
841 TotalSize += current->Size;
842 current->Used.Dumped = TRUE;
843 }
844 }
845 }
846 if (CurrentTag != 0 && CurrentNrBlocks != 0)
847 {
848 MiDumpTagStats(CurrentTag, CurrentNrBlocks, CurrentSize);
849 }
850 }
851 if (!IsListEmpty(&tmpListHead))
852 {
853 tag_hash_table[i].Flink = tmpListHead.Flink;
854 tag_hash_table[i].Flink->Blink = &tag_hash_table[i];
855 tag_hash_table[i].Blink = tmpListHead.Blink;
856 tag_hash_table[i].Blink->Flink = &tag_hash_table[i];
857 }
858 }
859 if (TotalBlocks != 0)
860 {
861 DbgPrint("TotalBlocks %d TotalSize %d AverageSize %d\n",
862 TotalBlocks, TotalSize, TotalSize / TotalBlocks);
863 }
864 else
865 {
866 DbgPrint("TotalBlocks %d TotalSize %d\n",
867 TotalBlocks, TotalSize);
868 }
869 Size = EiFreeNonPagedPool - (MiNonPagedPoolLength - MiNonPagedPoolNrOfPages * PAGE_SIZE);
870 DbgPrint("Freeblocks %d TotalFreeSize %d AverageFreeSize %d\n",
871 EiNrFreeBlocks, Size, EiNrFreeBlocks ? Size / EiNrFreeBlocks : 0);
872 DbgPrint("***************** Dump Complete ***************\n");
873 #endif /* defined(TAG_STATISTICS_TRACKING) && !defined(WHOLE_PAGE_ALLOCATIONS) */
874 }
875
876 VOID
877 MiDebugDumpNonPagedPool(BOOLEAN NewOnly)
878 {
879 #ifndef WHOLE_PAGE_ALLOCATIONS
880 BLOCK_HDR* current;
881 PLIST_ENTRY current_entry;
882 KIRQL oldIrql;
883
884 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
885
886 DbgPrint("******* Dumping non paging pool contents ******\n");
887 current_entry = UsedBlockListHead.Flink;
888 while (current_entry != &UsedBlockListHead)
889 {
890 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, Used.ListEntry);
891 if (!NewOnly || !current->Used.Dumped)
892 {
893 CHAR c1, c2, c3, c4;
894
895 c1 = (CHAR)((current->Used.Tag >> 24) & 0xFF);
896 c2 = (CHAR)((current->Used.Tag >> 16) & 0xFF);
897 c3 = (CHAR)((current->Used.Tag >> 8) & 0xFF);
898 c4 = (CHAR)(current->Used.Tag & 0xFF);
899
900 if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
901 {
902 DbgPrint("Size 0x%x Tag 0x%x (%c%c%c%c) Allocator 0x%x\n",
903 current->Size, current->Used.Tag, c4, c3, c2, c1,
904 current->Used.Caller);
905 }
906 else
907 {
908 DbgPrint("Size 0x%x Tag 0x%x Allocator 0x%x\n",
909 current->Size, current->Used.Tag, current->Used.Caller);
910 }
911 current->Used.Dumped = TRUE;
912 }
913 current_entry = current_entry->Flink;
914 }
915 DbgPrint("***************** Dump Complete ***************\n");
916 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
917 #endif /* not WHOLE_PAGE_ALLOCATIONS */
918 }
919
920 #ifndef WHOLE_PAGE_ALLOCATIONS
921
922 #ifdef ENABLE_VALIDATE_POOL
923 static void validate_free_list(void)
924 /*
925 * FUNCTION: Validate the integrity of the list of free blocks
926 */
927 {
928 BLOCK_HDR* current;
929 unsigned int blocks_seen=0;
930 PNODE p;
931
932 p = avl_get_first(FreeBlockListRoot);
933
934 while(p)
935 {
936 PVOID base_addr;
937
938 current = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
939 base_addr = (PVOID)current;
940
941 if (current->Magic != BLOCK_HDR_FREE_MAGIC)
942 {
943 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
944 current);
945 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
946 }
947
948 if (base_addr < MiNonPagedPoolStart ||
949 base_addr + BLOCK_HDR_SIZE + current->Size > MiNonPagedPoolStart + MiNonPagedPoolLength)
950 {
951 DbgPrint("Block %x found outside pool area\n",current);
952 DbgPrint("Size %d\n",current->Size);
953 DbgPrint("Limits are %x %x\n",MiNonPagedPoolStart,
954 MiNonPagedPoolStart+MiNonPagedPoolLength);
955 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
956 }
957 blocks_seen++;
958 if (blocks_seen > EiNrFreeBlocks)
959 {
960 DbgPrint("Too many blocks on free list\n");
961 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
962 }
963 p = avl_get_next(FreeBlockListRoot, p);
964 }
965 }
966
967 static void validate_used_list(void)
968 /*
969 * FUNCTION: Validate the integrity of the list of used blocks
970 */
971 {
972 BLOCK_HDR* current;
973 PLIST_ENTRY current_entry;
974 unsigned int blocks_seen=0;
975
976 current_entry = UsedBlockListHead.Flink;
977 while (current_entry != &UsedBlockListHead)
978 {
979 PVOID base_addr;
980
981 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, Used.ListEntry);
982 base_addr = (PVOID)current;
983
984 if (current->Magic != BLOCK_HDR_USED_MAGIC)
985 {
986 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
987 current);
988 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
989 }
990 if (base_addr < MiNonPagedPoolStart ||
991 (base_addr+BLOCK_HDR_SIZE+current->Size) >
992 MiNonPagedPoolStart+MiNonPagedPoolLength)
993 {
994 DbgPrint("Block %x found outside pool area\n",current);
995 DbgPrint("Size %d\n",current->Size);
996 DbgPrint("Limits are %x %x\n",MiNonPagedPoolStart,
997 MiNonPagedPoolStart+MiNonPagedPoolLength);
998 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
999 }
1000 blocks_seen++;
1001 if (blocks_seen > EiNrUsedBlocks)
1002 {
1003 DbgPrint("Too many blocks on used list\n");
1004 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1005 }
1006 if (current->Used.ListEntry.Flink != &UsedBlockListHead &&
1007 current->Used.ListEntry.Flink->Blink != &current->Used.ListEntry)
1008 {
1009 DbgPrint("%s:%d:Break in list (current %x next %x "
1010 "current->next->previous %x)\n",
1011 __FILE__,__LINE__,current, current->Used.ListEntry.Flink,
1012 current->Used.ListEntry.Flink->Blink);
1013 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1014 }
1015
1016 current_entry = current_entry->Flink;
1017 }
1018 }
1019
1020 static void check_duplicates(BLOCK_HDR* blk)
1021 /*
1022 * FUNCTION: Check a block has no duplicates
1023 * ARGUMENTS:
1024 * blk = block to check
1025 * NOTE: Bug checks if duplicates are found
1026 */
1027 {
1028 char* base = (char*)blk;
1029 char* last = ((char*)blk) + BLOCK_HDR_SIZE + blk->Size;
1030 BLOCK_HDR* current;
1031 PLIST_ENTRY current_entry;
1032 PNODE p;
1033
1034 p = avl_get_first(FreeBlockListRoot);
1035
1036 while (p)
1037 {
1038 current = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
1039
1040 if (current->Magic != BLOCK_HDR_FREE_MAGIC)
1041 {
1042 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
1043 current);
1044 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1045 }
1046
1047 if ( (char*)current > base && (char*)current < last )
1048 {
1049 DbgPrint("intersecting blocks on list\n");
1050 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1051 }
1052 if ( (char*)current < base &&
1053 ((char*)current + current->Size + BLOCK_HDR_SIZE)
1054 > base )
1055 {
1056 DbgPrint("intersecting blocks on list\n");
1057 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1058 }
1059 p = avl_get_next(FreeBlockListRoot, p);
1060 }
1061
1062 current_entry = UsedBlockListHead.Flink;
1063 while (current_entry != &UsedBlockListHead)
1064 {
1065 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, Used.ListEntry);
1066
1067 if ( (char*)current > base && (char*)current < last )
1068 {
1069 DbgPrint("intersecting blocks on list\n");
1070 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1071 }
1072 if ( (char*)current < base &&
1073 ((char*)current + current->Size + BLOCK_HDR_SIZE)
1074 > base )
1075 {
1076 DbgPrint("intersecting blocks on list\n");
1077 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1078 }
1079
1080 current_entry = current_entry->Flink;
1081 }
1082
1083 }
1084
1085 static void validate_kernel_pool(void)
1086 /*
1087 * FUNCTION: Checks the integrity of the kernel memory heap
1088 */
1089 {
1090 BLOCK_HDR* current;
1091 PLIST_ENTRY current_entry;
1092 PNODE p;
1093
1094 validate_free_list();
1095 validate_used_list();
1096
1097 p = avl_get_first(FreeBlockListRoot);
1098 while (p)
1099 {
1100 current = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
1101 check_duplicates(current);
1102 p = avl_get_next(FreeBlockListRoot, p);
1103 }
1104 current_entry = UsedBlockListHead.Flink;
1105 while (current_entry != &UsedBlockListHead)
1106 {
1107 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, Used.ListEntry);
1108 check_duplicates(current);
1109 current_entry = current_entry->Flink;
1110 }
1111 }
1112 #endif
1113
1114 #if 0
1115 STATIC VOID
1116 free_pages(BLOCK_HDR* blk)
1117 {
1118 ULONG start;
1119 ULONG end;
1120 ULONG i;
1121
1122 start = (ULONG)blk;
1123 end = (ULONG)blk + BLOCK_HDR_SIZE + blk->Size;
1124
1125 /*
1126 * If the block doesn't contain a whole page then there is nothing to do
1127 */
1128 if (PAGE_ROUND_UP(start) >= PAGE_ROUND_DOWN(end))
1129 {
1130 return;
1131 }
1132 }
1133 #endif
1134
1135 static void remove_from_used_list(BLOCK_HDR* current)
1136 {
1137 RemoveEntryList(&current->Used.ListEntry);
1138 EiUsedNonPagedPool -= current->Size;
1139 EiNrUsedBlocks--;
1140 }
1141
1142 static void remove_from_free_list(BLOCK_HDR* current)
1143 {
1144 DPRINT("remove_from_free_list %d\n", current->Size);
1145
1146 avl_remove(&FreeBlockListRoot, &current->Free.Node, compare_node);
1147
1148 EiFreeNonPagedPool -= current->Size;
1149 EiNrFreeBlocks--;
1150 DPRINT("remove_from_free_list done\n");
1151 #ifdef DUMP_AVL
1152
1153 DumpFreeBlockTree();
1154 #endif
1155 }
1156
1157 static void
1158 add_to_free_list(BLOCK_HDR* blk)
1159 /*
1160 * FUNCTION: add the block to the free list (internal)
1161 */
1162 {
1163 BLOCK_HDR* current;
1164 BOOL UpdatePrevPtr = FALSE;
1165
1166 DPRINT("add_to_free_list (%d)\n", blk->Size);
1167
1168 EiNrFreeBlocks++;
1169
1170 current = blk->previous;
1171 if (current && current->Magic == BLOCK_HDR_FREE_MAGIC)
1172 {
1173 remove_from_free_list(current);
1174 current->Size = current->Size + BLOCK_HDR_SIZE + blk->Size;
1175 current->Magic = BLOCK_HDR_USED_MAGIC;
1176 memset(blk, 0xcc, BLOCK_HDR_SIZE);
1177 blk = current;
1178 UpdatePrevPtr = TRUE;
1179 }
1180
1181 current = (BLOCK_HDR*)((char*)blk + BLOCK_HDR_SIZE + blk->Size);
1182 if ((char*)current < (char*)MiNonPagedPoolStart + MiNonPagedPoolLength &&
1183 current->Magic == BLOCK_HDR_FREE_MAGIC)
1184 {
1185 remove_from_free_list(current);
1186 blk->Size += BLOCK_HDR_SIZE + current->Size;
1187 memset(current, 0xcc, BLOCK_HDR_SIZE);
1188 UpdatePrevPtr = TRUE;
1189 current = (BLOCK_HDR*)((char*)blk + BLOCK_HDR_SIZE + blk->Size);
1190 }
1191 if (UpdatePrevPtr &&
1192 (char*)current < (char*)MiNonPagedPoolStart + MiNonPagedPoolLength)
1193 {
1194 current->previous = blk;
1195 }
1196 DPRINT("%d\n", blk->Size);
1197 blk->Magic = BLOCK_HDR_FREE_MAGIC;
1198 EiFreeNonPagedPool += blk->Size;
1199 avl_insert(&FreeBlockListRoot, &blk->Free.Node, compare_node);
1200
1201 DPRINT("add_to_free_list done\n");
1202 #ifdef DUMP_AVL
1203
1204 DumpFreeBlockTree();
1205 #endif
1206 }
1207
1208 static void add_to_used_list(BLOCK_HDR* blk)
1209 /*
1210 * FUNCTION: add the block to the used list (internal)
1211 */
1212 {
1213 InsertHeadList(&UsedBlockListHead, &blk->Used.ListEntry);
1214 EiUsedNonPagedPool += blk->Size;
1215 EiNrUsedBlocks++;
1216 }
1217
1218 inline static void* block_to_address(BLOCK_HDR* blk)
1219 /*
1220 * FUNCTION: Translate a block header address to the corresponding block
1221 * address (internal)
1222 */
1223 {
1224 return ( (void *) ((char*)blk + BLOCK_HDR_SIZE));
1225 }
1226
1227 inline static BLOCK_HDR* address_to_block(void* addr)
1228 {
1229 return (BLOCK_HDR *)
1230 ( ((char*)addr) - BLOCK_HDR_SIZE );
1231 }
1232
1233 static BOOLEAN
1234 grow_block(BLOCK_HDR* blk, PVOID end)
1235 {
1236 NTSTATUS Status;
1237 PFN_TYPE Page[32];
1238 ULONG_PTR StartIndex, EndIndex;
1239 ULONG i, j, k;
1240
1241 StartIndex = (ULONG_PTR)(PAGE_ROUND_UP((ULONG_PTR)blk + BLOCK_HDR_SIZE - (ULONG_PTR)MiNonPagedPoolStart)) / PAGE_SIZE;
1242 EndIndex = ((ULONG_PTR)PAGE_ROUND_UP(end) - (ULONG_PTR)MiNonPagedPoolStart) / PAGE_SIZE;
1243
1244
1245 for (i = StartIndex; i < EndIndex; i++)
1246 {
1247 if (!(MiNonPagedPoolAllocMap[i / 32] & (1 << (i % 32))))
1248 {
1249 for (j = i + 1; j < EndIndex && j - i < 32; j++)
1250 {
1251 if (MiNonPagedPoolAllocMap[j / 32] & (1 << (j % 32)))
1252 {
1253 break;
1254 }
1255 }
1256 for (k = 0; k < j - i; k++)
1257 {
1258 Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &Page[k]);
1259 if (!NT_SUCCESS(Status))
1260 {
1261 for (i = 0; i < k; i++)
1262 {
1263 MmReleasePageMemoryConsumer(MC_NPPOOL, Page[i]);
1264 }
1265 return FALSE;
1266 }
1267 }
1268 Status = MmCreateVirtualMapping(NULL,
1269 (PVOID)((ULONG_PTR)MiNonPagedPoolStart + i * PAGE_SIZE),
1270 PAGE_READWRITE|PAGE_SYSTEM,
1271 Page,
1272 k);
1273 if (!NT_SUCCESS(Status))
1274 {
1275 for (i = 0; i < k; i++)
1276 {
1277 MmReleasePageMemoryConsumer(MC_NPPOOL, Page[i]);
1278 }
1279 return FALSE;
1280 }
1281 for (j = i; j < k + i; j++)
1282 {
1283 MiNonPagedPoolAllocMap[j / 32] |= (1 << (j % 32));
1284 }
1285 MiNonPagedPoolNrOfPages += k;
1286 i += k - 1;
1287 }
1288 }
1289 return TRUE;
1290 }
1291
1292 static BLOCK_HDR* get_block(unsigned int size, unsigned long alignment)
1293 {
1294 BLOCK_HDR *blk, *current, *previous = NULL, *next = NULL, *best = NULL;
1295 ULONG previous_size = 0, current_size, next_size = 0, new_size;
1296 PVOID end;
1297 PVOID addr, aligned_addr;
1298 PNODE p;
1299
1300 DPRINT("get_block %d\n", size);
1301
1302 if (alignment == 0)
1303 {
1304 p = avl_find_equal_or_greater(FreeBlockListRoot, size, compare_value);
1305 if (p)
1306 {
1307 best = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
1308 addr = block_to_address(best);
1309 }
1310 }
1311 else
1312 {
1313 p = avl_find_equal_or_greater(FreeBlockListRoot, size, compare_value);
1314
1315 while(p)
1316 {
1317 current = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
1318 addr = block_to_address(current);
1319 /* calculate first aligned address available within this block */
1320 aligned_addr = MM_ROUND_UP(addr, alignment);
1321 /* check to see if this address is already aligned */
1322 if (addr == aligned_addr)
1323 {
1324 if (current->Size >= size &&
1325 (best == NULL || current->Size < best->Size))
1326 {
1327 best = current;
1328 }
1329 }
1330 else
1331 {
1332 /* make sure there's enough room to make a free block by the space skipped
1333 * from alignment. If not, calculate forward to the next alignment
1334 * and see if we allocate there...
1335 */
1336 new_size = (ULONG_PTR)aligned_addr - (ULONG_PTR)addr + size;
1337 if ((ULONG_PTR)aligned_addr - (ULONG_PTR)addr < BLOCK_HDR_SIZE)
1338 {
1339 /* not enough room for a free block header, add some more bytes */
1340 aligned_addr = MM_ROUND_UP(block_to_address((BLOCK_HDR*)((char*)current + BLOCK_HDR_SIZE)), alignment);
1341 new_size = (ULONG_PTR)aligned_addr - (ULONG_PTR)addr + size;
1342 }
1343 if (current->Size >= new_size &&
1344 (best == NULL || current->Size < best->Size))
1345 {
1346 best = current;
1347 }
1348 }
1349 if (best && current->Size >= size + alignment + 2 * BLOCK_HDR_SIZE)
1350 {
1351 break;
1352 }
1353 p = avl_get_next(FreeBlockListRoot, p);
1354
1355 }
1356 }
1357
1358 /*
1359 * We didn't find anything suitable at all.
1360 */
1361 if (best == NULL)
1362 {
1363 return NULL;
1364 }
1365
1366 current = best;
1367 current_size = current->Size;
1368
1369 if (alignment > 0)
1370 {
1371 addr = block_to_address(current);
1372 aligned_addr = MM_ROUND_UP(addr, alignment);
1373 if (addr != aligned_addr)
1374 {
1375 blk = address_to_block(aligned_addr);
1376 if ((char*)blk < (char*)current + BLOCK_HDR_SIZE)
1377 {
1378 aligned_addr = MM_ROUND_UP(block_to_address((BLOCK_HDR*)((char*)current + BLOCK_HDR_SIZE)), alignment);
1379 blk = address_to_block(aligned_addr);
1380 }
1381 /*
1382 * if size-aligned, break off the preceding bytes into their own block...
1383 */
1384 previous = current;
1385 previous_size = (ULONG_PTR)blk - (ULONG_PTR)previous - BLOCK_HDR_SIZE;
1386 current = blk;
1387 current_size -= ((ULONG_PTR)current - (ULONG_PTR)previous);
1388 }
1389 }
1390
1391 end = (char*)current + BLOCK_HDR_SIZE + size;
1392
1393 if (current_size >= size + BLOCK_HDR_SIZE + MM_POOL_ALIGNMENT)
1394 {
1395 /* create a new free block after our block, if the memory size is >= 4 byte for this block */
1396 next = (BLOCK_HDR*)((ULONG_PTR)current + size + BLOCK_HDR_SIZE);
1397 next_size = current_size - size - BLOCK_HDR_SIZE;
1398 current_size = size;
1399 end = (char*)next + BLOCK_HDR_SIZE;
1400 }
1401
1402 if (previous)
1403 {
1404 remove_from_free_list(previous);
1405 if (!grow_block(previous, end))
1406 {
1407 add_to_free_list(previous);
1408 return NULL;
1409 }
1410 memset(current, 0, BLOCK_HDR_SIZE);
1411 current->Size = current_size;
1412 current->Magic = BLOCK_HDR_USED_MAGIC;
1413 current->previous = previous;
1414 previous->Size = previous_size;
1415 if (next == NULL)
1416 {
1417 blk = (BLOCK_HDR*)((char*)current + BLOCK_HDR_SIZE + current->Size);
1418 if ((char*)blk < (char*)MiNonPagedPoolStart + MiNonPagedPoolLength)
1419 {
1420 blk->previous = current;
1421 }
1422 }
1423
1424 add_to_free_list(previous);
1425 }
1426 else
1427 {
1428 remove_from_free_list(current);
1429
1430 if (!grow_block(current, end))
1431 {
1432 add_to_free_list(current);
1433 return NULL;
1434 }
1435
1436 current->Magic = BLOCK_HDR_USED_MAGIC;
1437 if (next)
1438 {
1439 current->Size = current_size;
1440 }
1441 }
1442
1443 if (next)
1444 {
1445 memset(next, 0, BLOCK_HDR_SIZE);
1446
1447 next->Size = next_size;
1448 next->Magic = BLOCK_HDR_FREE_MAGIC;
1449 next->previous = current;
1450 blk = (BLOCK_HDR*)((char*)next + BLOCK_HDR_SIZE + next->Size);
1451 if ((char*)blk < (char*)MiNonPagedPoolStart + MiNonPagedPoolLength)
1452 {
1453 blk->previous = next;
1454 }
1455 add_to_free_list(next);
1456 }
1457
1458 add_to_used_list(current);
1459 VALIDATE_POOL;
1460 return current;
1461 }
1462
1463 #endif /* not WHOLE_PAGE_ALLOCATIONS */
1464
1465 VOID STDCALL ExFreeNonPagedPool (PVOID block)
1466 /*
1467 * FUNCTION: Releases previously allocated memory
1468 * ARGUMENTS:
1469 * block = block to free
1470 */
1471 {
1472 #ifdef WHOLE_PAGE_ALLOCATIONS /* WHOLE_PAGE_ALLOCATIONS */
1473 KIRQL oldIrql;
1474
1475 if (block == NULL)
1476 {
1477 return;
1478 }
1479
1480 DPRINT("freeing block %x\n",block);
1481
1482 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->size,
1483 ((PULONG)&block)[-1]);
1484
1485 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1486
1487 ExFreeWholePageBlock(block);
1488 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1489
1490 #else /* not WHOLE_PAGE_ALLOCATIONS */
1491
1492 BLOCK_HDR* blk=address_to_block(block);
1493 KIRQL oldIrql;
1494
1495 if (block == NULL)
1496 {
1497 return;
1498 }
1499
1500 DPRINT("freeing block %x\n",blk);
1501
1502 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->Size,
1503 ((PULONG)&block)[-1]);
1504
1505 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1506
1507 VALIDATE_POOL;
1508
1509 if (blk->Magic != BLOCK_HDR_USED_MAGIC)
1510 {
1511 if (blk->Magic == BLOCK_HDR_FREE_MAGIC)
1512 {
1513 DbgPrint("ExFreePool of already freed address %x\n", block);
1514 }
1515 else
1516 {
1517 DbgPrint("ExFreePool of non-allocated address %x (magic %x)\n",
1518 block, blk->Magic);
1519 }
1520 KEBUGCHECK(0);
1521 return;
1522 }
1523
1524 memset(block, 0xcc, blk->Size);
1525 #ifdef TAG_STATISTICS_TRACKING
1526
1527 MiRemoveFromTagHashTable(blk);
1528 #endif
1529
1530 remove_from_used_list(blk);
1531 blk->Magic = BLOCK_HDR_FREE_MAGIC;
1532 add_to_free_list(blk);
1533 VALIDATE_POOL;
1534 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1535
1536 #endif /* WHOLE_PAGE_ALLOCATIONS */
1537 }
1538
1539 PVOID STDCALL
1540 ExAllocateNonPagedPoolWithTag(POOL_TYPE Type, ULONG Size, ULONG Tag, PVOID Caller)
1541 {
1542 #ifdef WHOLE_PAGE_ALLOCATIONS
1543 PVOID block;
1544 KIRQL oldIrql;
1545
1546 ASSERT_IRQL(DISPATCH_LEVEL);
1547
1548 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
1549 Size,Caller);
1550
1551 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1552
1553 /*
1554 * accomodate this useful idiom
1555 */
1556 if (Size == 0)
1557 {
1558 POOL_TRACE("= NULL\n");
1559 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1560 return(NULL);
1561 }
1562
1563 block = ExAllocateWholePageBlock(Size);
1564 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1565 if (NULL == block)
1566 {
1567 DPRINT1("Trying to allocate %lu bytes from nonpaged pool - nothing suitable found, returning NULL\n",
1568 Size );
1569 }
1570 return(block);
1571
1572 #else /* not WHOLE_PAGE_ALLOCATIONS */
1573
1574 PVOID block;
1575 BLOCK_HDR* best = NULL;
1576 KIRQL oldIrql;
1577 ULONG alignment;
1578
1579 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
1580 Size,Caller);
1581
1582 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1583
1584 VALIDATE_POOL;
1585
1586 #if 0
1587 /* after some allocations print the npaged pool stats */
1588 #ifdef TAG_STATISTICS_TRACKING
1589
1590 {
1591 static ULONG counter = 0;
1592 if (counter++ % 100000 == 0)
1593 {
1594 MiDebugDumpNonPagedPoolStats(FALSE);
1595 }
1596 }
1597 #endif
1598 #endif
1599 /*
1600 * accomodate this useful idiom
1601 */
1602 if (Size == 0)
1603 {
1604 POOL_TRACE("= NULL\n");
1605 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1606 return(NULL);
1607 }
1608 /* Make the size dword alligned, this makes the block dword alligned */
1609 Size = ROUND_UP(Size, MM_POOL_ALIGNMENT);
1610
1611 if (Size >= PAGE_SIZE)
1612 {
1613 alignment = PAGE_SIZE;
1614 }
1615 else if (Type == NonPagedPoolCacheAligned ||
1616 Type == NonPagedPoolCacheAlignedMustS)
1617 {
1618 alignment = MM_CACHE_LINE_SIZE;
1619 }
1620 else
1621 {
1622 alignment = 0;
1623 }
1624
1625 best = get_block(Size, alignment);
1626 if (best == NULL)
1627 {
1628 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1629 DPRINT1("Trying to allocate %lu bytes from nonpaged pool - nothing suitable found, returning NULL\n",
1630 Size );
1631 return NULL;
1632 }
1633 best->Used.Tag = Tag;
1634 best->Used.Caller = Caller;
1635 best->Used.Dumped = FALSE;
1636 best->Used.TagListEntry.Flink = best->Used.TagListEntry.Blink = NULL;
1637 #ifdef TAG_STATISTICS_TRACKING
1638
1639 MiAddToTagHashTable(best);
1640 #endif
1641
1642 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1643 block = block_to_address(best);
1644 /* RtlZeroMemory(block, Size);*/
1645 return(block);
1646 #endif /* WHOLE_PAGE_ALLOCATIONS */
1647 }
1648
1649 #ifdef WHOLE_PAGE_ALLOCATIONS
1650
1651 PVOID STDCALL
1652 ExAllocateWholePageBlock(ULONG Size)
1653 {
1654 PVOID Address;
1655 PFN_TYPE Page;
1656 ULONG i;
1657 ULONG NrPages;
1658 ULONG Base;
1659
1660 NrPages = ROUND_UP(Size, PAGE_SIZE) / PAGE_SIZE;
1661
1662 Base = RtlFindClearBitsAndSet(&NonPagedPoolAllocMap, NrPages + 1, NonPagedPoolAllocMapHint);
1663 if (Base == 0xffffffff)
1664 {
1665 DbgPrint("Out of non paged pool space.\n");
1666 KEBUGCHECK(0);
1667 }
1668 if (NonPagedPoolAllocMapHint == Base)
1669 {
1670 NonPagedPoolAllocMapHint += (NrPages + 1);
1671 }
1672
1673 Address = MiNonPagedPoolStart + Base * PAGE_SIZE;
1674
1675 for (i = 0; i < NrPages; i++)
1676 {
1677 Page = MmAllocPage(MC_NPPOOL, 0);
1678 if (Page == 0)
1679 {
1680 KEBUGCHECK(0);
1681 }
1682 MmCreateVirtualMapping(NULL,
1683 Address + (i * PAGE_SIZE),
1684 PAGE_READWRITE | PAGE_SYSTEM,
1685 &Page,
1686 TRUE);
1687 }
1688
1689 MiCurrentNonPagedPoolLength = max(MiCurrentNonPagedPoolLength, (Base + NrPages) * PAGE_SIZE);
1690 Size = (Size + 7) & ~7;
1691 Address = ((PVOID)((PUCHAR)Address + (NrPages * PAGE_SIZE) - Size));
1692
1693 DPRINT("WPALLOC: %x (%d)\n", Address, Size);
1694
1695 return Address;
1696 }
1697
1698 VOID STDCALL
1699 ExFreeWholePageBlock(PVOID Addr)
1700 {
1701 ULONG Base;
1702
1703 if (Addr < MiNonPagedPoolStart ||
1704 Addr >= (MiNonPagedPoolStart + MiCurrentNonPagedPoolLength))
1705 {
1706 DbgPrint("Block %x found outside pool area\n", Addr);
1707 KEBUGCHECK(0);
1708 }
1709 Base = (Addr - MiNonPagedPoolStart) / PAGE_SIZE;
1710 NonPagedPoolAllocMapHint = min(NonPagedPoolAllocMapHint, Base);
1711 while (MmIsPagePresent(NULL, Addr))
1712 {
1713 MmDeleteVirtualMapping(NULL,
1714 Addr,
1715 TRUE,
1716 NULL,
1717 NULL);
1718 RtlClearBits(&NonPagedPoolAllocMap, Base, 1);
1719 Base++;
1720 Addr += PAGE_SIZE;
1721 }
1722 }
1723
1724 #endif /* WHOLE_PAGE_ALLOCATIONS */
1725
1726 /* Whole Page Allocations note:
1727 *
1728 * We need enough pages for these things:
1729 *
1730 * 1) bitmap buffer
1731 * 2) hdr
1732 * 3) actual pages
1733 *
1734 */
1735 VOID INIT_FUNCTION
1736 MiInitializeNonPagedPool(VOID)
1737 {
1738 NTSTATUS Status;
1739 PFN_TYPE Page;
1740 ULONG i;
1741 PVOID Address;
1742 #ifdef WHOLE_PAGE_ALLOCATIONS
1743 #else
1744
1745 BLOCK_HDR* blk;
1746 #endif
1747 #ifdef TAG_STATISTICS_TRACKING
1748
1749 for (i = 0; i < TAG_HASH_TABLE_SIZE; i++)
1750 {
1751 InitializeListHead(&tag_hash_table[i]);
1752 }
1753 #endif
1754 KeInitializeSpinLock(&MmNpoolLock);
1755 InitializeListHead(&UsedBlockListHead);
1756 InitializeListHead(&AddressListHead);
1757 FreeBlockListRoot = NULL;
1758 #ifdef WHOLE_PAGE_ALLOCATIONS
1759
1760 NonPagedPoolAllocMapHint = PAGE_ROUND_UP(MiNonPagedPoolLength / PAGE_SIZE / 8) / PAGE_SIZE; /* Pages of bitmap buffer */
1761 MiCurrentNonPagedPoolLength = NonPagedPoolAllocMapHint * PAGE_SIZE;
1762 Address = MiNonPagedPoolStart;
1763 for (i = 0; i < NonPagedPoolAllocMapHint; i++)
1764 {
1765 Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &Page);
1766 if (!NT_SUCCESS(Status))
1767 {
1768 DbgPrint("Unable to allocate a page\n");
1769 KEBUGCHECK(0);
1770 }
1771 Status = MmCreateVirtualMapping(NULL,
1772 Address,
1773 PAGE_READWRITE|PAGE_SYSTEM,
1774 &Page,
1775 1);
1776 if (!NT_SUCCESS(Status))
1777 {
1778 DbgPrint("Unable to create virtual mapping\n");
1779 KEBUGCHECK(0);
1780 }
1781 Address += PAGE_SIZE;
1782 }
1783 RtlInitializeBitMap(&NonPagedPoolAllocMap, MiNonPagedPoolStart,
1784 MiNonPagedPoolLength / PAGE_SIZE);
1785 RtlClearAllBits(&NonPagedPoolAllocMap);
1786 RtlSetBits(&NonPagedPoolAllocMap, 0, NonPagedPoolAllocMapHint);
1787 #else
1788
1789 MiNonPagedPoolAllocMap = block_to_address((BLOCK_HDR*)MiNonPagedPoolStart);
1790 MiNonPagedPoolNrOfPages = PAGE_ROUND_UP(ROUND_UP(MiNonPagedPoolLength / PAGE_SIZE, 32) / 8 + 2 * BLOCK_HDR_SIZE);
1791 MiNonPagedPoolNrOfPages /= PAGE_SIZE;
1792 Address = MiNonPagedPoolStart;
1793 for (i = 0; i < MiNonPagedPoolNrOfPages; i++)
1794 {
1795 Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &Page);
1796 if (!NT_SUCCESS(Status))
1797 {
1798 DbgPrint("Unable to allocate a page\n");
1799 KEBUGCHECK(0);
1800 }
1801 Status = MmCreateVirtualMapping(NULL,
1802 Address,
1803 PAGE_READWRITE|PAGE_SYSTEM,
1804 &Page,
1805 1);
1806 if (!NT_SUCCESS(Status))
1807 {
1808 DbgPrint("Unable to create virtual mapping\n");
1809 KEBUGCHECK(0);
1810 }
1811 MiNonPagedPoolAllocMap[i / 32] |= (1 << (i % 32));
1812 Address = (PVOID)((ULONG_PTR)Address + PAGE_SIZE);
1813 }
1814 /* the first block contains the non paged pool bitmap */
1815 blk = (BLOCK_HDR*)MiNonPagedPoolStart;
1816 memset(blk, 0, BLOCK_HDR_SIZE);
1817 blk->Magic = BLOCK_HDR_USED_MAGIC;
1818 blk->Size = ROUND_UP(MiNonPagedPoolLength / PAGE_SIZE, 32) / 8;
1819 blk->previous = NULL;
1820 blk->Used.Tag = 0xffffffff;
1821 blk->Used.Caller = 0;
1822 blk->Used.Dumped = FALSE;
1823 add_to_used_list(blk);
1824 #ifdef TAG_STATISTICS_TRACKING
1825
1826 MiAddToTagHashTable(blk);
1827 #endif
1828 /* the second block is the first free block */
1829 blk = (BLOCK_HDR*)((char*)blk + BLOCK_HDR_SIZE + blk->Size);
1830 memset(blk, 0, BLOCK_HDR_SIZE);
1831 memset((char*)blk + BLOCK_HDR_SIZE, 0x0cc, MiNonPagedPoolNrOfPages * PAGE_SIZE - ((ULONG_PTR)blk + BLOCK_HDR_SIZE - (ULONG_PTR)MiNonPagedPoolStart));
1832 blk->Magic = BLOCK_HDR_FREE_MAGIC;
1833 blk->Size = MiNonPagedPoolLength - ((ULONG_PTR)blk + BLOCK_HDR_SIZE - (ULONG_PTR)MiNonPagedPoolStart);
1834 blk->previous = (BLOCK_HDR*)MiNonPagedPoolStart;
1835 add_to_free_list(blk);
1836 #endif
1837 }
1838
1839 PVOID
1840 STDCALL
1841 MiAllocateSpecialPool (IN POOL_TYPE PoolType,
1842 IN SIZE_T NumberOfBytes,
1843 IN ULONG Tag,
1844 IN ULONG Underrun
1845 )
1846 {
1847 /* FIXME: Special Pools not Supported */
1848 DbgPrint("Special Pools not supported\n");
1849 return 0;
1850 }
1851
1852 /* EOF */