Add missing processor architecture cases
[reactos.git] / reactos / ntoskrnl / mm / npool.c
1 /* $Id$
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/npool.c
6 * PURPOSE: Implements the kernel memory pool
7 *
8 * PROGRAMMERS: David Welch (welch@cwcom.net)
9 * Iwan Fatahi (i_fatahi@hotmail.com)
10 * Robert Bergkvist (fragdance@hotmail.com)
11 * Hartmut Birr
12 */
13
14 /* INCLUDES ****************************************************************/
15
16 #include <ntoskrnl.h>
17 #define NDEBUG
18 #include <internal/debug.h>
19
20 #if defined (ALLOC_PRAGMA)
21 #pragma alloc_text(INIT, MiInitializeNonPagedPool)
22 #endif
23
24 #ifdef ENABLE_VALIDATE_POOL
25 #define VALIDATE_POOL validate_kernel_pool()
26 #else
27 #define VALIDATE_POOL
28 #endif
29
30 #if 0
31 #define POOL_TRACE(args...) do { DbgPrint(args); } while(0);
32 #else
33 #if defined(__GNUC__)
34 #define POOL_TRACE(args...)
35 #else
36 #define POOL_TRACE
37 #endif /* __GNUC__ */
38 #endif
39
40 /* avl types ****************************************************************/
41
42 /* FIXME:
43 * This declarations should be moved into a separate header file.
44 */
45
46 typedef struct _NODE
47 {
48 struct _NODE* link[2];
49 struct _NODE* parent;
50 signed char balance;
51 }
52 NODE, *PNODE;
53
54 /* TYPES *******************************************************************/
55
56 #define BLOCK_HDR_USED_MAGIC (0xdeadbeef)
57 #define BLOCK_HDR_FREE_MAGIC (0xceadbeef)
58
59 /*
60 * fields present at the start of a block (this is for internal use only)
61 */
62 typedef struct _HDR
63 {
64 ULONG Magic;
65 ULONG Size;
66 struct _HDR* previous;
67 } HDR, *PHDR;
68
69 typedef struct _HDR_USED
70 {
71 HDR hdr;
72 LIST_ENTRY ListEntry;
73 ULONG Tag;
74 PVOID Caller;
75 LIST_ENTRY TagListEntry;
76 BOOLEAN Dumped;
77 } HDR_USED, *PHDR_USED;
78
79 typedef struct _HDR_FREE
80 {
81 HDR hdr;
82 NODE Node;
83 } HDR_FREE, *PHDR_FREE;
84
85 #define HDR_FREE_SIZE ROUND_UP(sizeof(HDR_FREE), MM_POOL_ALIGNMENT)
86 #define HDR_USED_SIZE ROUND_UP(sizeof(HDR_USED), MM_POOL_ALIGNMENT)
87
88
89 /* GLOBALS *****************************************************************/
90
91 extern PVOID MiNonPagedPoolStart;
92 extern ULONG MiNonPagedPoolLength;
93
94 /*
95 * Head of the list of free blocks
96 */
97 static PNODE FreeBlockListRoot = NULL;
98
99 /*
100 * Head of the list of in use block
101 */
102 static LIST_ENTRY UsedBlockListHead;
103
104 static LIST_ENTRY AddressListHead;
105
106 /*
107 * Count of free blocks
108 */
109 static ULONG EiNrFreeBlocks = 0;
110
111 /*
112 * Count of used blocks
113 */
114 static ULONG EiNrUsedBlocks = 0;
115
116 /*
117 * Lock that protects the non-paged pool data structures
118 */
119 static KSPIN_LOCK MmNpoolLock;
120
121 /*
122 * Total memory used for free nonpaged pool blocks
123 */
124 ULONG EiFreeNonPagedPool = 0;
125
126 /*
127 * Total memory used for nonpaged pool blocks
128 */
129 ULONG EiUsedNonPagedPool = 0;
130
131 /* Total quota for Non Paged Pool */
132 ULONG MmTotalNonPagedPoolQuota = 0;
133
134 /*
135 * Allocate a range of memory in the nonpaged pool
136 */
137 PVOID
138 MiAllocNonPagedPoolRegion(unsigned int nr_pages);
139
140 VOID
141 MiFreeNonPagedPoolRegion(PVOID Addr, ULONG Count, BOOLEAN Free);
142
143 #ifdef TAG_STATISTICS_TRACKING
144 #define TAG_HASH_TABLE_SIZE (1024)
145 static LIST_ENTRY tag_hash_table[TAG_HASH_TABLE_SIZE];
146 #endif /* TAG_STATISTICS_TRACKING */
147
148 static PULONG MiNonPagedPoolAllocMap;
149 static ULONG MiNonPagedPoolNrOfPages;
150
151 /* avl helper functions ****************************************************/
152
153 void DumpFreeBlockNode(PNODE p)
154 {
155 static int count = 0;
156 HDR_FREE* blk;
157
158 count++;
159
160 if (p)
161 {
162 DumpFreeBlockNode(p->link[0]);
163 blk = CONTAINING_RECORD(p, HDR_FREE, Node);
164 DbgPrint("%08x %8d (%d)\n", blk, blk->hdr.Size, count);
165 DumpFreeBlockNode(p->link[1]);
166 }
167
168 count--;
169 }
170 void DumpFreeBlockTree(void)
171 {
172 DbgPrint("--- Begin tree ------------------\n");
173 DbgPrint("%08x\n", CONTAINING_RECORD(FreeBlockListRoot, HDR_FREE, Node));
174 DumpFreeBlockNode(FreeBlockListRoot);
175 DbgPrint("--- End tree --------------------\n");
176 }
177
178 int compare_node(PNODE p1, PNODE p2)
179 {
180 HDR_FREE* blk1 = CONTAINING_RECORD(p1, HDR_FREE, Node);
181 HDR_FREE* blk2 = CONTAINING_RECORD(p2, HDR_FREE, Node);
182
183 if (blk1->hdr.Size == blk2->hdr.Size)
184 {
185 if (blk1 < blk2)
186 {
187 return -1;
188 }
189 if (blk1 > blk2)
190 {
191 return 1;
192 }
193 }
194 else
195 {
196 if (blk1->hdr.Size < blk2->hdr.Size)
197 {
198 return -1;
199 }
200 if (blk1->hdr.Size > blk2->hdr.Size)
201 {
202 return 1;
203 }
204 }
205 return 0;
206
207 }
208
209 int compare_value(PVOID value, PNODE p)
210 {
211 HDR_FREE* blk = CONTAINING_RECORD(p, HDR_FREE, Node);
212 ULONG v = *(PULONG)value;
213
214 if (v < blk->hdr.Size)
215 {
216 return -1;
217 }
218 if (v > blk->hdr.Size)
219 {
220 return 1;
221 }
222 return 0;
223 }
224
225 /* avl functions **********************************************************/
226
227 /* FIXME:
228 * The avl functions should be moved into a separate file.
229 */
230
231 /* The avl_insert and avl_remove are based on libavl (library for manipulation of binary trees). */
232
233 void avl_insert (PNODE * root, PNODE n, int (*compare)(PNODE, PNODE))
234 {
235 PNODE y; /* Top node to update balance factor, and parent. */
236 PNODE p, q; /* Iterator, and parent. */
237 PNODE w; /* New root of rebalanced subtree. */
238 int dir = 0; /* Direction to descend. */
239
240 n->link[0] = n->link[1] = n->parent = NULL;
241 n->balance = 0;
242
243 y = *root;
244 for (q = NULL, p = *root; p != NULL; q = p, p = p->link[dir])
245 {
246 dir = compare(n, p) > 0;
247 if (p->balance != 0)
248 {
249 y = p;
250 }
251 }
252
253 n->parent = q;
254 if (q != NULL)
255 {
256 q->link[dir] = n;
257 }
258 else
259 {
260 *root = n;
261 }
262
263 if (*root == n)
264 {
265 return;
266 }
267
268 for (p = n; p != y; p = q)
269 {
270 q = p->parent;
271 dir = q->link[0] != p;
272 if (dir == 0)
273 {
274 q->balance--;
275 }
276 else
277 {
278 q->balance++;
279 }
280 }
281
282 if (y->balance == -2)
283 {
284 PNODE x = y->link[0];
285 if (x->balance == -1)
286 {
287 w = x;
288 y->link[0] = x->link[1];
289 x->link[1] = y;
290 x->balance = y->balance = 0;
291 x->parent = y->parent;
292 y->parent = x;
293 if (y->link[0] != NULL)
294 {
295 y->link[0]->parent = y;
296 }
297 }
298 else
299 {
300 ASSERT(x->balance == +1);
301 w = x->link[1];
302 x->link[1] = w->link[0];
303 w->link[0] = x;
304 y->link[0] = w->link[1];
305 w->link[1] = y;
306 if (w->balance == -1)
307 {
308 x->balance = 0;
309 y->balance = +1;
310 }
311 else if (w->balance == 0)
312 {
313 x->balance = y->balance = 0;
314 }
315 else /* |w->pavl_balance == +1| */
316 {
317 x->balance = -1;
318 y->balance = 0;
319 }
320 w->balance = 0;
321 w->parent = y->parent;
322 x->parent = y->parent = w;
323 if (x->link[1] != NULL)
324 {
325 x->link[1]->parent = x;
326 }
327 if (y->link[0] != NULL)
328 {
329 y->link[0]->parent = y;
330 }
331 }
332 }
333 else if (y->balance == +2)
334 {
335 PNODE x = y->link[1];
336 if (x->balance == +1)
337 {
338 w = x;
339 y->link[1] = x->link[0];
340 x->link[0] = y;
341 x->balance = y->balance = 0;
342 x->parent = y->parent;
343 y->parent = x;
344 if (y->link[1] != NULL)
345 {
346 y->link[1]->parent = y;
347 }
348 }
349 else
350 {
351 ASSERT(x->balance == -1);
352 w = x->link[0];
353 x->link[0] = w->link[1];
354 w->link[1] = x;
355 y->link[1] = w->link[0];
356 w->link[0] = y;
357 if (w->balance == 1)
358 {
359 x->balance = 0;
360 y->balance = -1;
361 }
362 else if (w->balance == 0)
363 {
364 x->balance = y->balance = 0;
365 }
366 else /* |w->pavl_balance == -1| */
367 {
368 x->balance = +1;
369 y->balance = 0;
370 }
371 w->balance = 0;
372 w->parent = y->parent;
373 x->parent = y->parent = w;
374 if (x->link[0] != NULL)
375 {
376 x->link[0]->parent = x;
377 }
378 if (y->link[1] != NULL)
379 {
380 y->link[1]->parent = y;
381 }
382 }
383 }
384 else
385 {
386 return;
387 }
388 if (w->parent != NULL)
389 {
390 w->parent->link[y != w->parent->link[0]] = w;
391 }
392 else
393 {
394 *root = w;
395 }
396
397 return;
398 }
399
400 void avl_remove (PNODE *root, PNODE item, int (*compare)(PNODE, PNODE))
401 {
402 PNODE p; /* Traverses tree to find node to delete. */
403 PNODE q; /* Parent of |p|. */
404 int dir; /* Side of |q| on which |p| is linked. */
405
406 if (root == NULL || *root == NULL)
407 {
408 return ;
409 }
410
411 p = item;
412 q = p->parent;
413 if (q == NULL)
414 {
415 q = (PNODE) root;
416 dir = 0;
417 }
418 else
419 {
420 dir = compare(p, q) > 0;
421 }
422
423 if (p->link[1] == NULL)
424 {
425 q->link[dir] = p->link[0];
426 if (q->link[dir] != NULL)
427 {
428 q->link[dir]->parent = p->parent;
429 }
430 }
431 else
432 {
433 PNODE r = p->link[1];
434 if (r->link[0] == NULL)
435 {
436 r->link[0] = p->link[0];
437 q->link[dir] = r;
438 r->parent = p->parent;
439 if (r->link[0] != NULL)
440 {
441 r->link[0]->parent = r;
442 }
443 r->balance = p->balance;
444 q = r;
445 dir = 1;
446 }
447 else
448 {
449 PNODE s = r->link[0];
450 while (s->link[0] != NULL)
451 {
452 s = s->link[0];
453 }
454 r = s->parent;
455 r->link[0] = s->link[1];
456 s->link[0] = p->link[0];
457 s->link[1] = p->link[1];
458 q->link[dir] = s;
459 if (s->link[0] != NULL)
460 {
461 s->link[0]->parent = s;
462 }
463 s->link[1]->parent = s;
464 s->parent = p->parent;
465 if (r->link[0] != NULL)
466 {
467 r->link[0]->parent = r;
468 }
469 s->balance = p->balance;
470 q = r;
471 dir = 0;
472 }
473 }
474
475 item->link[0] = item->link[1] = item->parent = NULL;
476 item->balance = 0;
477
478 while (q != (PNODE) root)
479 {
480 PNODE y = q;
481
482 if (y->parent != NULL)
483 {
484 q = y->parent;
485 }
486 else
487 {
488 q = (PNODE) root;
489 }
490
491 if (dir == 0)
492 {
493 dir = q->link[0] != y;
494 y->balance++;
495 if (y->balance == +1)
496 {
497 break;
498 }
499 else if (y->balance == +2)
500 {
501 PNODE x = y->link[1];
502 if (x->balance == -1)
503 {
504 PNODE w;
505
506 ASSERT(x->balance == -1);
507 w = x->link[0];
508 x->link[0] = w->link[1];
509 w->link[1] = x;
510 y->link[1] = w->link[0];
511 w->link[0] = y;
512 if (w->balance == +1)
513 {
514 x->balance = 0;
515 y->balance = -1;
516 }
517 else if (w->balance == 0)
518 {
519 x->balance = y->balance = 0;
520 }
521 else /* |w->pavl_balance == -1| */
522 {
523 x->balance = +1;
524 y->balance = 0;
525 }
526 w->balance = 0;
527 w->parent = y->parent;
528 x->parent = y->parent = w;
529 if (x->link[0] != NULL)
530 {
531 x->link[0]->parent = x;
532 }
533 if (y->link[1] != NULL)
534 {
535 y->link[1]->parent = y;
536 }
537 q->link[dir] = w;
538 }
539 else
540 {
541 y->link[1] = x->link[0];
542 x->link[0] = y;
543 x->parent = y->parent;
544 y->parent = x;
545 if (y->link[1] != NULL)
546 {
547 y->link[1]->parent = y;
548 }
549 q->link[dir] = x;
550 if (x->balance == 0)
551 {
552 x->balance = -1;
553 y->balance = +1;
554 break;
555 }
556 else
557 {
558 x->balance = y->balance = 0;
559 y = x;
560 }
561 }
562 }
563 }
564 else
565 {
566 dir = q->link[0] != y;
567 y->balance--;
568 if (y->balance == -1)
569 {
570 break;
571 }
572 else if (y->balance == -2)
573 {
574 PNODE x = y->link[0];
575 if (x->balance == +1)
576 {
577 PNODE w;
578 ASSERT(x->balance == +1);
579 w = x->link[1];
580 x->link[1] = w->link[0];
581 w->link[0] = x;
582 y->link[0] = w->link[1];
583 w->link[1] = y;
584 if (w->balance == -1)
585 {
586 x->balance = 0;
587 y->balance = +1;
588 }
589 else if (w->balance == 0)
590 {
591 x->balance = y->balance = 0;
592 }
593 else /* |w->pavl_balance == +1| */
594 {
595 x->balance = -1;
596 y->balance = 0;
597 }
598 w->balance = 0;
599 w->parent = y->parent;
600 x->parent = y->parent = w;
601 if (x->link[1] != NULL)
602 {
603 x->link[1]->parent = x;
604 }
605 if (y->link[0] != NULL)
606 {
607 y->link[0]->parent = y;
608 }
609 q->link[dir] = w;
610 }
611 else
612 {
613 y->link[0] = x->link[1];
614 x->link[1] = y;
615 x->parent = y->parent;
616 y->parent = x;
617 if (y->link[0] != NULL)
618 {
619 y->link[0]->parent = y;
620 }
621 q->link[dir] = x;
622 if (x->balance == 0)
623 {
624 x->balance = +1;
625 y->balance = -1;
626 break;
627 }
628 else
629 {
630 x->balance = y->balance = 0;
631 y = x;
632 }
633 }
634 }
635 }
636 }
637
638 }
639
640 PNODE _cdecl avl_get_first(PNODE root)
641 {
642 PNODE p;
643 if (root == NULL)
644 {
645 return NULL;
646 }
647 p = root;
648 while (p->link[0])
649 {
650 p = p->link[0];
651 }
652 return p;
653 }
654
655 PNODE avl_get_next(PNODE root, PNODE p)
656 {
657 PNODE q;
658 if (p->link[1])
659 {
660 p = p->link[1];
661 while(p->link[0])
662 {
663 p = p->link[0];
664 }
665 return p;
666 }
667 else
668 {
669 q = p->parent;
670 while (q && q->link[1] == p)
671 {
672 p = q;
673 q = q->parent;
674 }
675 if (q == NULL)
676 {
677 return NULL;
678 }
679 else
680 {
681 return q;
682 }
683 }
684 }
685
686 PNODE avl_find_equal_or_greater(PNODE root, ULONG size, int (compare)(PVOID, PNODE))
687 {
688 PNODE p;
689 PNODE prev = NULL;
690 int cmp;
691
692 for (p = root; p != NULL;)
693 {
694 cmp = compare((PVOID)&size, p);
695 if (cmp < 0)
696 {
697 prev = p;
698 p = p->link[0];
699 }
700 else if (cmp > 0)
701 {
702 p = p->link[1];
703 }
704 else
705 {
706 while (p->link[0])
707 {
708 cmp = compare((PVOID)&size, p->link[0]);
709 if (cmp != 0)
710 {
711 break;
712 }
713 p = p->link[0];
714 }
715 return p;
716 }
717 }
718 return prev;
719 }
720
721 /* non paged pool functions ************************************************/
722
723 #ifdef TAG_STATISTICS_TRACKING
724 VOID
725 MiRemoveFromTagHashTable(HDR_USED* block)
726 /*
727 * Remove a block from the tag hash table
728 */
729 {
730 if (block->Tag == 0)
731 {
732 return;
733 }
734 RemoveEntryList(&block->TagListEntry);
735 }
736
737 VOID
738 MiAddToTagHashTable(HDR_USED* block)
739 /*
740 * Add a block to the tag hash table
741 */
742 {
743 ULONG hash;
744
745 if (block->Tag == 0)
746 {
747 return;
748 }
749
750 hash = block->Tag % TAG_HASH_TABLE_SIZE;
751
752 InsertHeadList(&tag_hash_table[hash], &block->TagListEntry);
753 }
754 #endif /* TAG_STATISTICS_TRACKING */
755
756 #if defined(TAG_STATISTICS_TRACKING)
757 VOID STATIC
758 MiDumpTagStats(ULONG CurrentTag, ULONG CurrentNrBlocks, ULONG CurrentSize)
759 {
760 CHAR c1, c2, c3, c4;
761
762 c1 = (CHAR)((CurrentTag >> 24) & 0xFF);
763 c2 = (CHAR)((CurrentTag >> 16) & 0xFF);
764 c3 = (CHAR)((CurrentTag >> 8) & 0xFF);
765 c4 = (CHAR)(CurrentTag & 0xFF);
766
767 if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
768 {
769 DbgPrint("Tag %x (%c%c%c%c) Blocks %d Total Size %d Average Size %d\n",
770 CurrentTag, c4, c3, c2, c1, CurrentNrBlocks,
771 CurrentSize, CurrentSize / CurrentNrBlocks);
772 }
773 else
774 {
775 DbgPrint("Tag %x Blocks %d Total Size %d Average Size %d\n",
776 CurrentTag, CurrentNrBlocks, CurrentSize,
777 CurrentSize / CurrentNrBlocks);
778 }
779 }
780 #endif /* defined(TAG_STATISTICS_TRACKING) */
781
782 VOID
783 NTAPI
784 MiDebugDumpNonPagedPoolStats(BOOLEAN NewOnly)
785 {
786 #if defined(TAG_STATISTICS_TRACKING)
787 ULONG i;
788 HDR_USED* current;
789 ULONG CurrentTag;
790 ULONG CurrentNrBlocks = 0;
791 ULONG CurrentSize = 0;
792 ULONG TotalBlocks;
793 ULONG TotalSize;
794 ULONG Size;
795 LIST_ENTRY tmpListHead;
796 PLIST_ENTRY current_entry;
797
798 DbgPrint("******* Dumping non paging pool stats ******\n");
799 TotalBlocks = 0;
800 TotalSize = 0;
801 for (i = 0; i < TAG_HASH_TABLE_SIZE; i++)
802 {
803 InitializeListHead(&tmpListHead);
804
805 while (!IsListEmpty(&tag_hash_table[i]))
806 {
807 CurrentTag = 0;
808
809 current_entry = tag_hash_table[i].Flink;
810 while (current_entry != &tag_hash_table[i])
811 {
812 current = CONTAINING_RECORD(current_entry, HDR_USED, TagListEntry);
813 current_entry = current_entry->Flink;
814 if (CurrentTag == 0)
815 {
816 CurrentTag = current->Tag;
817 CurrentNrBlocks = 0;
818 CurrentSize = 0;
819 }
820 if (current->Tag == CurrentTag)
821 {
822 RemoveEntryList(&current->TagListEntry);
823 InsertHeadList(&tmpListHead, &current->TagListEntry);
824 if (!NewOnly || !current->Dumped)
825 {
826 CurrentNrBlocks++;
827 TotalBlocks++;
828 CurrentSize += current->hdr.Size;
829 TotalSize += current->hdr.Size;
830 current->Dumped = TRUE;
831 }
832 }
833 }
834 if (CurrentTag != 0 && CurrentNrBlocks != 0)
835 {
836 MiDumpTagStats(CurrentTag, CurrentNrBlocks, CurrentSize);
837 }
838 }
839 if (!IsListEmpty(&tmpListHead))
840 {
841 tag_hash_table[i].Flink = tmpListHead.Flink;
842 tag_hash_table[i].Flink->Blink = &tag_hash_table[i];
843 tag_hash_table[i].Blink = tmpListHead.Blink;
844 tag_hash_table[i].Blink->Flink = &tag_hash_table[i];
845 }
846 }
847 if (TotalBlocks != 0)
848 {
849 DbgPrint("TotalBlocks %d TotalSize %d AverageSize %d\n",
850 TotalBlocks, TotalSize, TotalSize / TotalBlocks);
851 }
852 else
853 {
854 DbgPrint("TotalBlocks %d TotalSize %d\n",
855 TotalBlocks, TotalSize);
856 }
857 Size = EiFreeNonPagedPool - (MiNonPagedPoolLength - MiNonPagedPoolNrOfPages * PAGE_SIZE);
858 DbgPrint("Freeblocks %d TotalFreeSize %d AverageFreeSize %d\n",
859 EiNrFreeBlocks, Size, EiNrFreeBlocks ? Size / EiNrFreeBlocks : 0);
860 DbgPrint("***************** Dump Complete ***************\n");
861 #endif /* defined(TAG_STATISTICS_TRACKING) */
862 }
863
864 VOID
865 NTAPI
866 MiDebugDumpNonPagedPool(BOOLEAN NewOnly)
867 {
868 #if defined(POOL_DEBUG_APIS)
869 HDR_USED* current;
870 PLIST_ENTRY current_entry;
871 KIRQL oldIrql;
872
873 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
874
875 DbgPrint("******* Dumping non paging pool contents ******\n");
876 current_entry = UsedBlockListHead.Flink;
877 while (current_entry != &UsedBlockListHead)
878 {
879 current = CONTAINING_RECORD(current_entry, HDR_USED, ListEntry);
880 if (!NewOnly || !current->Dumped)
881 {
882 CHAR c1, c2, c3, c4;
883
884 c1 = (CHAR)((current->Tag >> 24) & 0xFF);
885 c2 = (CHAR)((current->Tag >> 16) & 0xFF);
886 c3 = (CHAR)((current->Tag >> 8) & 0xFF);
887 c4 = (CHAR)(current->Tag & 0xFF);
888
889 if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
890 {
891 DbgPrint("Size 0x%x Tag 0x%x (%c%c%c%c) Allocator 0x%x\n",
892 current->hdr.Size, current->Tag, c4, c3, c2, c1,
893 current->Caller);
894 }
895 else
896 {
897 DbgPrint("Size 0x%x Tag 0x%x Allocator 0x%x\n",
898 current->hdr.Size, current->Tag, current->Caller);
899 }
900 current->Dumped = TRUE;
901 }
902 current_entry = current_entry->Flink;
903 }
904 DbgPrint("***************** Dump Complete ***************\n");
905 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
906 #endif
907 }
908
909 #ifdef ENABLE_VALIDATE_POOL
910 static void validate_free_list(void)
911 /*
912 * FUNCTION: Validate the integrity of the list of free blocks
913 */
914 {
915 HDR_FREE* current;
916 unsigned int blocks_seen=0;
917 PNODE p;
918
919 p = avl_get_first(FreeBlockListRoot);
920
921 while(p)
922 {
923 PVOID base_addr;
924
925 current = CONTAINING_RECORD(p, HDR_FREE, Node);
926 base_addr = (PVOID)current;
927
928 if (current->hdr.Magic != BLOCK_HDR_FREE_MAGIC)
929 {
930 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
931 current);
932 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
933 }
934
935 if (base_addr < MiNonPagedPoolStart ||
936 (ULONG_PTR)base_addr + current->hdr.Size > (ULONG_PTR)MiNonPagedPoolStart + MiNonPagedPoolLength)
937 {
938 DbgPrint("Block %x found outside pool area\n",current);
939 DbgPrint("Size %d\n",current->hdr.Size);
940 DbgPrint("Limits are %x %x\n",MiNonPagedPoolStart,
941 (ULONG_PTR)MiNonPagedPoolStart+MiNonPagedPoolLength);
942 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
943 }
944 blocks_seen++;
945 if (blocks_seen > EiNrFreeBlocks)
946 {
947 DbgPrint("Too many blocks on free list\n");
948 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
949 }
950 p = avl_get_next(FreeBlockListRoot, p);
951 }
952 }
953
954 static void validate_used_list(void)
955 /*
956 * FUNCTION: Validate the integrity of the list of used blocks
957 */
958 {
959 HDR_USED* current;
960 PLIST_ENTRY current_entry;
961 unsigned int blocks_seen=0;
962
963 current_entry = UsedBlockListHead.Flink;
964 while (current_entry != &UsedBlockListHead)
965 {
966 PVOID base_addr;
967
968 current = CONTAINING_RECORD(current_entry, HDR_USED, ListEntry);
969 base_addr = (PVOID)current;
970
971 if (current->hdr.Magic != BLOCK_HDR_USED_MAGIC)
972 {
973 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
974 current);
975 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
976 }
977 if (base_addr < MiNonPagedPoolStart ||
978 ((ULONG_PTR)base_addr+current->hdr.Size) >
979 (ULONG_PTR)MiNonPagedPoolStart+MiNonPagedPoolLength)
980 {
981 DbgPrint("Block %x found outside pool area\n",current);
982 DbgPrint("Size %d\n",current->hdr.Size);
983 DbgPrint("Limits are %x %x\n",MiNonPagedPoolStart,
984 (ULONG_PTR)MiNonPagedPoolStart+MiNonPagedPoolLength);
985 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
986 }
987 blocks_seen++;
988 if (blocks_seen > EiNrUsedBlocks)
989 {
990 DbgPrint("Too many blocks on used list\n");
991 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
992 }
993 if (current->ListEntry.Flink != &UsedBlockListHead &&
994 current->ListEntry.Flink->Blink != &current->ListEntry)
995 {
996 DbgPrint("%s:%d:Break in list (current %x next %x "
997 "current->next->previous %x)\n",
998 __FILE__,__LINE__,current, current->ListEntry.Flink,
999 current->ListEntry.Flink->Blink);
1000 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1001 }
1002
1003 current_entry = current_entry->Flink;
1004 }
1005 }
1006
1007 static void check_duplicates(HDR* blk)
1008 /*
1009 * FUNCTION: Check a block has no duplicates
1010 * ARGUMENTS:
1011 * blk = block to check
1012 * NOTE: Bug checks if duplicates are found
1013 */
1014 {
1015 ULONG_PTR base = (ULONG_PTR)blk;
1016 ULONG_PTR last = (ULONG_PTR)blk + blk->Size;
1017 PLIST_ENTRY current_entry;
1018 PNODE p;
1019 HDR_FREE* free;
1020 HDR_USED* used;
1021
1022 p = avl_get_first(FreeBlockListRoot);
1023
1024 while (p)
1025 {
1026 free = CONTAINING_RECORD(p, HDR_FREE, Node);
1027 if (free->hdr.Magic != BLOCK_HDR_FREE_MAGIC)
1028 {
1029 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
1030 free);
1031 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1032 }
1033
1034 if ( (ULONG_PTR)free > base && (ULONG_PTR)free < last )
1035 {
1036 DbgPrint("intersecting blocks on list\n");
1037 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1038 }
1039 if ( (ULONG_PTR)free < base &&
1040 ((ULONG_PTR)free + free->hdr.Size) > base )
1041 {
1042 DbgPrint("intersecting blocks on list\n");
1043 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1044 }
1045 p = avl_get_next(FreeBlockListRoot, p);
1046 }
1047
1048 current_entry = UsedBlockListHead.Flink;
1049 while (current_entry != &UsedBlockListHead)
1050 {
1051 used = CONTAINING_RECORD(current_entry, HDR_USED, ListEntry);
1052
1053 if ( (ULONG_PTR)used > base && (ULONG_PTR)used < last )
1054 {
1055 DbgPrint("intersecting blocks on list\n");
1056 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1057 }
1058 if ( (ULONG_PTR)used < base &&
1059 ((ULONG_PTR)used + used->hdr.Size) > base )
1060 {
1061 DbgPrint("intersecting blocks on list\n");
1062 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1063 }
1064
1065 current_entry = current_entry->Flink;
1066 }
1067
1068 }
1069
1070 static void validate_kernel_pool(void)
1071 /*
1072 * FUNCTION: Checks the integrity of the kernel memory heap
1073 */
1074 {
1075 HDR_FREE* free;
1076 HDR_USED* used;
1077 PLIST_ENTRY current_entry;
1078 PNODE p;
1079
1080 validate_free_list();
1081 validate_used_list();
1082
1083 p = avl_get_first(FreeBlockListRoot);
1084 while (p)
1085 {
1086 free = CONTAINING_RECORD(p, HDR_FREE, Node);
1087 check_duplicates(&free->hdr);
1088 p = avl_get_next(FreeBlockListRoot, p);
1089 }
1090 current_entry = UsedBlockListHead.Flink;
1091 while (current_entry != &UsedBlockListHead)
1092 {
1093 used = CONTAINING_RECORD(current_entry, HDR_USED, ListEntry);
1094 check_duplicates(&used->hdr);
1095 current_entry = current_entry->Flink;
1096 }
1097 }
1098 #endif
1099
1100 #if 0
1101 STATIC VOID
1102 free_pages(HDR_FREE* blk)
1103 {
1104 ULONG start;
1105 ULONG end;
1106
1107 start = (ULONG_PTR)blk;
1108 end = (ULONG_PTR)blk + blk->hdr.Size;
1109
1110 /*
1111 * If the block doesn't contain a whole page then there is nothing to do
1112 */
1113 if (PAGE_ROUND_UP(start) >= PAGE_ROUND_DOWN(end))
1114 {
1115 return;
1116 }
1117 }
1118 #endif
1119
1120 static void remove_from_used_list(HDR_USED* current)
1121 {
1122 RemoveEntryList(&current->ListEntry);
1123 EiUsedNonPagedPool -= current->hdr.Size;
1124 EiNrUsedBlocks--;
1125 }
1126
1127 static void remove_from_free_list(HDR_FREE* current)
1128 {
1129 DPRINT("remove_from_free_list %d\n", current->hdr.Size);
1130
1131 avl_remove(&FreeBlockListRoot, &current->Node, compare_node);
1132
1133 EiFreeNonPagedPool -= current->hdr.Size;
1134 EiNrFreeBlocks--;
1135 DPRINT("remove_from_free_list done\n");
1136 #ifdef DUMP_AVL
1137
1138 DumpFreeBlockTree();
1139 #endif
1140 }
1141
1142 static void
1143 add_to_free_list(HDR_FREE* blk)
1144 /*
1145 * FUNCTION: add the block to the free list (internal)
1146 */
1147 {
1148 HDR_FREE* current;
1149 BOOL UpdatePrevPtr = FALSE;
1150
1151 DPRINT("add_to_free_list (%d)\n", blk->hdr.Size);
1152
1153 EiNrFreeBlocks++;
1154
1155 current = (HDR_FREE*)blk->hdr.previous;
1156 if (current && current->hdr.Magic == BLOCK_HDR_FREE_MAGIC)
1157 {
1158 remove_from_free_list(current);
1159 current->hdr.Size = current->hdr.Size + blk->hdr.Size;
1160 current->hdr.Magic = BLOCK_HDR_USED_MAGIC;
1161 memset(blk, 0xcc, HDR_USED_SIZE);
1162 blk = current;
1163 UpdatePrevPtr = TRUE;
1164 }
1165
1166 current = (HDR_FREE*)((ULONG_PTR)blk + blk->hdr.Size);
1167 if ((char*)current < (char*)MiNonPagedPoolStart + MiNonPagedPoolLength &&
1168 current->hdr.Magic == BLOCK_HDR_FREE_MAGIC)
1169 {
1170 remove_from_free_list(current);
1171 blk->hdr.Size += current->hdr.Size;
1172 memset(current, 0xcc, HDR_FREE_SIZE);
1173 UpdatePrevPtr = TRUE;
1174 current = (HDR_FREE*)((ULONG_PTR)blk + blk->hdr.Size);
1175 }
1176 if (UpdatePrevPtr &&
1177 (char*)current < (char*)MiNonPagedPoolStart + MiNonPagedPoolLength)
1178 {
1179 current->hdr.previous = &blk->hdr;
1180 }
1181 DPRINT("%d\n", blk->hdr.Size);
1182 blk->hdr.Magic = BLOCK_HDR_FREE_MAGIC;
1183 EiFreeNonPagedPool += blk->hdr.Size;
1184 avl_insert(&FreeBlockListRoot, &blk->Node, compare_node);
1185 DPRINT("add_to_free_list done\n");
1186 #ifdef DUMP_AVL
1187
1188 DumpFreeBlockTree();
1189 #endif
1190 }
1191
1192 static void add_to_used_list(HDR_USED* blk)
1193 /*
1194 * FUNCTION: add the block to the used list (internal)
1195 */
1196 {
1197 InsertHeadList(&UsedBlockListHead, &blk->ListEntry);
1198 EiUsedNonPagedPool += blk->hdr.Size;
1199 EiNrUsedBlocks++;
1200 }
1201
1202
1203 static BOOLEAN
1204 grow_block(HDR_FREE* blk, PVOID end)
1205 {
1206 NTSTATUS Status;
1207 PFN_TYPE Page[32];
1208 ULONG_PTR StartIndex, EndIndex;
1209 ULONG i, j, k;
1210
1211 StartIndex = (ULONG_PTR)(PAGE_ROUND_UP((ULONG_PTR)blk + HDR_FREE_SIZE - (ULONG_PTR)MiNonPagedPoolStart)) / PAGE_SIZE;
1212 EndIndex = ((ULONG_PTR)PAGE_ROUND_UP(end) - (ULONG_PTR)MiNonPagedPoolStart) / PAGE_SIZE;
1213
1214
1215 for (i = StartIndex; i < EndIndex; i++)
1216 {
1217 if (!(MiNonPagedPoolAllocMap[i / 32] & (1 << (i % 32))))
1218 {
1219 for (j = i + 1; j < EndIndex && j - i < 32; j++)
1220 {
1221 if (MiNonPagedPoolAllocMap[j / 32] & (1 << (j % 32)))
1222 {
1223 break;
1224 }
1225 }
1226 for (k = 0; k < j - i; k++)
1227 {
1228 Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &Page[k]);
1229 if (!NT_SUCCESS(Status))
1230 {
1231 for (i = 0; i < k; i++)
1232 {
1233 MmReleasePageMemoryConsumer(MC_NPPOOL, Page[i]);
1234 }
1235 return FALSE;
1236 }
1237 }
1238 Status = MmCreateVirtualMapping(NULL,
1239 (PVOID)((ULONG_PTR)MiNonPagedPoolStart + i * PAGE_SIZE),
1240 PAGE_READWRITE|PAGE_SYSTEM,
1241 Page,
1242 k);
1243 if (!NT_SUCCESS(Status))
1244 {
1245 for (i = 0; i < k; i++)
1246 {
1247 MmReleasePageMemoryConsumer(MC_NPPOOL, Page[i]);
1248 }
1249 return FALSE;
1250 }
1251 for (j = i; j < k + i; j++)
1252 {
1253 MiNonPagedPoolAllocMap[j / 32] |= (1 << (j % 32));
1254 }
1255 MiNonPagedPoolNrOfPages += k;
1256 i += k - 1;
1257 }
1258 }
1259 return TRUE;
1260 }
1261
1262 static HDR_USED* get_block(unsigned int size, unsigned long alignment)
1263 {
1264 HDR_FREE *blk, *current, *previous = NULL, *next = NULL, *best = NULL;
1265 ULONG previous_size = 0, current_size, next_size = 0, new_size;
1266 PVOID end;
1267 PVOID addr, aligned_addr, best_aligned_addr=NULL;
1268 PNODE p;
1269
1270 DPRINT("get_block %d\n", size);
1271
1272 p = avl_find_equal_or_greater(FreeBlockListRoot, size + HDR_USED_SIZE, compare_value);
1273 while (p)
1274 {
1275 current = CONTAINING_RECORD(p, HDR_FREE, Node);
1276 addr = (PVOID)((ULONG_PTR)current + HDR_USED_SIZE);
1277 /* calculate first aligned address available within this block */
1278 aligned_addr = alignment > 0 ? MM_ROUND_UP(addr, alignment) : addr;
1279 if (size < PAGE_SIZE)
1280 {
1281 /* check that the block is in one page */
1282 if (PAGE_ROUND_DOWN(aligned_addr) != PAGE_ROUND_DOWN((ULONG_PTR)aligned_addr + size - 1))
1283 {
1284 aligned_addr = (PVOID)PAGE_ROUND_UP(aligned_addr);
1285 }
1286 }
1287 DPRINT("%x %x\n", addr, aligned_addr);
1288 if (aligned_addr != addr)
1289 {
1290 while((ULONG_PTR)aligned_addr - (ULONG_PTR)addr < HDR_FREE_SIZE)
1291 {
1292 if (alignment == 0)
1293 {
1294 aligned_addr = (PVOID)((ULONG_PTR)current + HDR_USED_SIZE + HDR_FREE_SIZE);
1295 }
1296 else
1297 {
1298 aligned_addr = MM_ROUND_UP((PVOID)((ULONG_PTR)current + HDR_USED_SIZE + HDR_FREE_SIZE), alignment);
1299 }
1300 if (size < PAGE_SIZE)
1301 {
1302 /* check that the block is in one page */
1303 if (PAGE_ROUND_DOWN(aligned_addr) != PAGE_ROUND_DOWN((ULONG_PTR)aligned_addr + size - 1))
1304 {
1305 aligned_addr = (PVOID)PAGE_ROUND_UP(aligned_addr);
1306 }
1307 }
1308 }
1309 }
1310 DPRINT("%x %x\n", addr, aligned_addr);
1311 new_size = (ULONG_PTR)aligned_addr - (ULONG_PTR)addr + size;
1312 if (current->hdr.Size >= new_size + HDR_USED_SIZE &&
1313 (best == NULL || current->hdr.Size < best->hdr.Size))
1314 {
1315 best = current;
1316 best_aligned_addr = aligned_addr;
1317 if (new_size <= size + 2 * HDR_FREE_SIZE)
1318 {
1319 break;
1320 }
1321 }
1322
1323 if (best)
1324 {
1325 if (size < PAGE_SIZE)
1326 {
1327 if (current->hdr.Size >= 2 * PAGE_SIZE + HDR_FREE_SIZE)
1328 {
1329 break;
1330 }
1331 }
1332 else
1333 {
1334 if (current->hdr.Size >= size + alignment + HDR_FREE_SIZE)
1335 {
1336 break;
1337 }
1338 }
1339 }
1340 p = avl_get_next(FreeBlockListRoot, p);
1341 }
1342 /*
1343 * We didn't find anything suitable at all.
1344 */
1345 if (best == NULL)
1346 {
1347 return NULL;
1348 }
1349
1350 DPRINT(":: blk %x blk->hdr.Size %d (%d) Size %d\n", best, best->hdr.Size, best->hdr.Size - HDR_USED_SIZE, size);
1351
1352 current = best;
1353 current_size = current->hdr.Size - HDR_USED_SIZE;
1354 addr = (PVOID)((ULONG_PTR)current + HDR_USED_SIZE);
1355 if (addr != best_aligned_addr)
1356 {
1357 blk = (HDR_FREE*)((ULONG_PTR)best_aligned_addr - HDR_USED_SIZE);
1358 /*
1359 * if size-aligned, break off the preceding bytes into their own block...
1360 */
1361 previous = current;
1362 previous_size = (ULONG_PTR)blk - (ULONG_PTR)previous - HDR_FREE_SIZE;
1363 current = blk;
1364 current_size -= ((ULONG_PTR)current - (ULONG_PTR)previous);
1365 }
1366 end = (PVOID)((ULONG_PTR)current + HDR_USED_SIZE + size);
1367
1368 if (current_size >= size + HDR_FREE_SIZE + MM_POOL_ALIGNMENT)
1369 {
1370 /* create a new free block after our block, if the memory size is >= 4 byte for this block */
1371 next = (HDR_FREE*)((ULONG_PTR)current + size + HDR_USED_SIZE);
1372 next_size = current_size - size - HDR_FREE_SIZE;
1373 current_size = size;
1374 end = (PVOID)((ULONG_PTR)next + HDR_FREE_SIZE);
1375 }
1376
1377 if (previous)
1378 {
1379 remove_from_free_list(previous);
1380 if (!grow_block(previous, end))
1381 {
1382 add_to_free_list(previous);
1383 return NULL;
1384 }
1385 memset(current, 0, HDR_USED_SIZE);
1386 current->hdr.Size = current_size + HDR_USED_SIZE;
1387 current->hdr.Magic = BLOCK_HDR_USED_MAGIC;
1388 current->hdr.previous = &previous->hdr;
1389 previous->hdr.Size = previous_size + HDR_FREE_SIZE;
1390 if (next == NULL)
1391 {
1392 blk = (HDR_FREE*)((ULONG_PTR)current + current->hdr.Size);
1393 if ((ULONG_PTR)blk < (ULONG_PTR)MiNonPagedPoolStart + MiNonPagedPoolLength)
1394 {
1395 blk->hdr.previous = &current->hdr;
1396 }
1397 }
1398
1399 add_to_free_list(previous);
1400 }
1401 else
1402 {
1403 remove_from_free_list(current);
1404
1405 if (!grow_block(current, end))
1406 {
1407 add_to_free_list(current);
1408 return NULL;
1409 }
1410 current->hdr.Magic = BLOCK_HDR_USED_MAGIC;
1411 if (next)
1412 {
1413 current->hdr.Size = current_size + HDR_USED_SIZE;
1414 }
1415 }
1416
1417 if (next)
1418 {
1419 memset(next, 0, HDR_FREE_SIZE);
1420 next->hdr.Size = next_size + HDR_FREE_SIZE;
1421 next->hdr.Magic = BLOCK_HDR_FREE_MAGIC;
1422 next->hdr.previous = &current->hdr;
1423 blk = (HDR_FREE*)((ULONG_PTR)next + next->hdr.Size);
1424 if ((ULONG_PTR)blk < (ULONG_PTR)MiNonPagedPoolStart + MiNonPagedPoolLength)
1425 {
1426 blk->hdr.previous = &next->hdr;
1427 }
1428 add_to_free_list(next);
1429 }
1430
1431 add_to_used_list((HDR_USED*)current);
1432 VALIDATE_POOL;
1433
1434 if (size < PAGE_SIZE)
1435 {
1436 addr = (PVOID)((ULONG_PTR)current + HDR_USED_SIZE);
1437 if (PAGE_ROUND_DOWN(addr) != PAGE_ROUND_DOWN((PVOID)((ULONG_PTR)addr + size - 1)))
1438 {
1439 DPRINT1("%x %x\n", addr, (ULONG_PTR)addr + size);
1440 }
1441 ASSERT (PAGE_ROUND_DOWN(addr) == PAGE_ROUND_DOWN((PVOID)((ULONG_PTR)addr + size - 1)));
1442 }
1443 if (alignment)
1444 {
1445 addr = (PVOID)((ULONG_PTR)current + HDR_USED_SIZE);
1446 ASSERT(MM_ROUND_UP(addr, alignment) == addr);
1447 }
1448 return (HDR_USED*)current;
1449 }
1450
1451 ULONG STDCALL
1452 ExRosQueryNonPagedPoolTag ( PVOID Addr )
1453 {
1454 HDR_USED* blk=(HDR_USED*)((ULONG_PTR)Addr - HDR_USED_SIZE);
1455 if (blk->hdr.Magic != BLOCK_HDR_USED_MAGIC)
1456 KEBUGCHECK(0);
1457
1458 return blk->Tag;
1459 }
1460
1461 VOID STDCALL ExFreeNonPagedPool (PVOID block)
1462 /*
1463 * FUNCTION: Releases previously allocated memory
1464 * ARGUMENTS:
1465 * block = block to free
1466 */
1467 {
1468 HDR_USED* blk=(HDR_USED*)((ULONG_PTR)block - HDR_USED_SIZE);
1469 KIRQL oldIrql;
1470
1471 if (block == NULL)
1472 {
1473 return;
1474 }
1475
1476 DPRINT("freeing block %x\n",blk);
1477
1478 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->hdr.Size,
1479 blk->Caller);
1480 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1481
1482 VALIDATE_POOL;
1483 if (blk->hdr.Magic != BLOCK_HDR_USED_MAGIC)
1484 {
1485 if (blk->hdr.Magic == BLOCK_HDR_FREE_MAGIC)
1486 {
1487 DbgPrint("ExFreePool of already freed address %x\n", block);
1488 }
1489 else
1490 {
1491 DbgPrint("ExFreePool of non-allocated address %x (magic %x)\n",
1492 block, blk->hdr.Magic);
1493 }
1494 KEBUGCHECK(0);
1495 return;
1496 }
1497 memset(block, 0xcc, blk->hdr.Size - HDR_USED_SIZE);
1498 #ifdef TAG_STATISTICS_TRACKING
1499
1500 MiRemoveFromTagHashTable(blk);
1501 #endif
1502
1503 remove_from_used_list(blk);
1504 blk->hdr.Magic = BLOCK_HDR_FREE_MAGIC;
1505 add_to_free_list((HDR_FREE*)blk);
1506 VALIDATE_POOL;
1507 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1508 }
1509
1510 PVOID STDCALL
1511 ExAllocateNonPagedPoolWithTag(POOL_TYPE Type, ULONG Size, ULONG Tag, PVOID Caller)
1512 {
1513 PVOID block;
1514 HDR_USED* best = NULL;
1515 KIRQL oldIrql;
1516 ULONG alignment;
1517
1518 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
1519 Size,Caller);
1520
1521 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1522
1523 VALIDATE_POOL;
1524
1525 #if 0
1526 /* after some allocations print the npaged pool stats */
1527 #ifdef TAG_STATISTICS_TRACKING
1528
1529 {
1530 static ULONG counter = 0;
1531 if (counter++ % 100000 == 0)
1532 {
1533 MiDebugDumpNonPagedPoolStats(FALSE);
1534 }
1535 }
1536 #endif
1537 #endif
1538 /*
1539 * accomodate this useful idiom
1540 */
1541 if (Size == 0)
1542 {
1543 POOL_TRACE("= NULL\n");
1544 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1545 return(NULL);
1546 }
1547 /* Make the size dword alligned, this makes the block dword alligned */
1548 Size = ROUND_UP(Size, MM_POOL_ALIGNMENT);
1549
1550 if (Size >= PAGE_SIZE)
1551 {
1552 alignment = PAGE_SIZE;
1553 }
1554 else if (Type & CACHE_ALIGNED_POOL_MASK)
1555 {
1556 alignment = MM_CACHE_LINE_SIZE;
1557 }
1558 else
1559 {
1560 alignment = 0;
1561 }
1562
1563 best = get_block(Size, alignment);
1564 if (best == NULL)
1565 {
1566 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1567 DPRINT1("Trying to allocate %lu bytes from nonpaged pool - nothing suitable found, returning NULL\n",
1568 Size );
1569 return NULL;
1570 }
1571 best->Tag = Tag;
1572 best->Caller = Caller;
1573 best->Dumped = FALSE;
1574 best->TagListEntry.Flink = best->TagListEntry.Blink = NULL;
1575 #ifdef TAG_STATISTICS_TRACKING
1576
1577 MiAddToTagHashTable(best);
1578 #endif
1579
1580 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1581 block = (PVOID)((ULONG_PTR)best + HDR_USED_SIZE);
1582 /* RtlZeroMemory(block, Size);*/
1583 return(block);
1584 }
1585
1586 VOID
1587 INIT_FUNCTION
1588 NTAPI
1589 MiInitializeNonPagedPool(VOID)
1590 {
1591 NTSTATUS Status;
1592 PFN_TYPE Page;
1593 ULONG i;
1594 PVOID Address;
1595 HDR_USED* used;
1596 HDR_FREE* free;
1597 #ifdef TAG_STATISTICS_TRACKING
1598
1599 for (i = 0; i < TAG_HASH_TABLE_SIZE; i++)
1600 {
1601 InitializeListHead(&tag_hash_table[i]);
1602 }
1603 #endif
1604 KeInitializeSpinLock(&MmNpoolLock);
1605 InitializeListHead(&UsedBlockListHead);
1606 InitializeListHead(&AddressListHead);
1607 FreeBlockListRoot = NULL;
1608
1609 MiNonPagedPoolAllocMap = (PVOID)((ULONG_PTR)MiNonPagedPoolStart + PAGE_SIZE);
1610 MiNonPagedPoolNrOfPages = PAGE_ROUND_UP(ROUND_UP(MiNonPagedPoolLength / PAGE_SIZE, 32) / 8 + HDR_FREE_SIZE) + PAGE_SIZE;
1611 MiNonPagedPoolNrOfPages /= PAGE_SIZE;
1612 Address = MiNonPagedPoolStart;
1613
1614 for (i = 0; i < MiNonPagedPoolNrOfPages; i++)
1615 {
1616 Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &Page);
1617 if (!NT_SUCCESS(Status))
1618 {
1619 DbgPrint("Unable to allocate a page\n");
1620 KEBUGCHECK(0);
1621 }
1622
1623 Status = MmCreateVirtualMapping(NULL,
1624 Address,
1625 PAGE_READWRITE|PAGE_SYSTEM,
1626 &Page,
1627 1);
1628 if (!NT_SUCCESS(Status))
1629 {
1630 DbgPrint("Unable to create virtual mapping\n");
1631 KEBUGCHECK(0);
1632 }
1633 Address = (PVOID)((ULONG_PTR)Address + PAGE_SIZE);
1634 }
1635
1636 for (i = 0; i < MiNonPagedPoolNrOfPages; i++)
1637 {
1638 MiNonPagedPoolAllocMap[i / 32] |= (1 << (i % 32));
1639 }
1640
1641 /* the first block is free */
1642 free = (HDR_FREE*)MiNonPagedPoolStart;
1643 free->hdr.Magic = BLOCK_HDR_FREE_MAGIC;
1644 free->hdr.Size = PAGE_SIZE - HDR_USED_SIZE;
1645 free->hdr.previous = NULL;
1646 memset((PVOID)((ULONG_PTR)free + HDR_FREE_SIZE), 0xcc, free->hdr.Size - HDR_FREE_SIZE);
1647 add_to_free_list(free);
1648
1649 /* the second block contains the non paged pool bitmap */
1650 used = (HDR_USED*)((ULONG_PTR)free + free->hdr.Size);
1651 used->hdr.Magic = BLOCK_HDR_USED_MAGIC;
1652 used->hdr.Size = ROUND_UP(MiNonPagedPoolLength / PAGE_SIZE, 32) / 8 + HDR_USED_SIZE;
1653 used->hdr.previous = &free->hdr;
1654 used->Tag = 0xffffffff;
1655 used->Caller = 0;
1656 used->Dumped = FALSE;
1657 add_to_used_list(used);
1658 #ifdef TAG_STATISTICS_TRACKING
1659 MiAddToTagHashTable(used);
1660 #endif
1661
1662 /* the third block is the free block after the bitmap */
1663 free = (HDR_FREE*)((ULONG_PTR)used + used->hdr.Size);
1664 free->hdr.Magic = BLOCK_HDR_FREE_MAGIC;
1665 free->hdr.Size = MiNonPagedPoolLength - ((ULONG_PTR)free - (ULONG_PTR)MiNonPagedPoolStart);
1666 free->hdr.previous = &used->hdr;
1667 memset((PVOID)((ULONG_PTR)free + HDR_FREE_SIZE), 0x0cc, (ULONG_PTR)Address - (ULONG_PTR)free - HDR_FREE_SIZE);
1668 add_to_free_list(free);
1669 }
1670
1671 PVOID
1672 STDCALL
1673 MiAllocateSpecialPool (IN POOL_TYPE PoolType,
1674 IN SIZE_T NumberOfBytes,
1675 IN ULONG Tag,
1676 IN ULONG Underrun
1677 )
1678 {
1679 /* FIXME: Special Pools not Supported */
1680 DbgPrint("Special Pools not supported\n");
1681 return NULL;
1682 }
1683
1684 /* EOF */