- Make the NDK compatible with the MSDDK again.
[reactos.git] / reactos / ntoskrnl / mm / npool.c
1 /* $Id$
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/npool.c
6 * PURPOSE: Implements the kernel memory pool
7 *
8 * PROGRAMMERS: David Welch (welch@cwcom.net)
9 * Iwan Fatahi (i_fatahi@hotmail.com)
10 * Robert Bergkvist (fragdance@hotmail.com)
11 * Hartmut Birr
12 */
13
14 /* INCLUDES ****************************************************************/
15
16 #include <ntoskrnl.h>
17 #define NDEBUG
18 #include <internal/debug.h>
19
20 #ifdef ENABLE_VALIDATE_POOL
21 #define VALIDATE_POOL validate_kernel_pool()
22 #else
23 #define VALIDATE_POOL
24 #endif
25
26 #if 0
27 #define POOL_TRACE(args...) do { DbgPrint(args); } while(0);
28 #else
29 #if defined(__GNUC__)
30 #define POOL_TRACE(args...)
31 #else
32 #define POOL_TRACE
33 #endif /* __GNUC__ */
34 #endif
35
36 /* avl types ****************************************************************/
37
38 /* FIXME:
39 * This declarations should be moved into a separate header file.
40 */
41
42 typedef struct _NODE
43 {
44 struct _NODE* link[2];
45 struct _NODE* parent;
46 signed char balance;
47 }
48 NODE, *PNODE;
49
50 /* TYPES *******************************************************************/
51
52 #define BLOCK_HDR_USED_MAGIC (0xdeadbeef)
53 #define BLOCK_HDR_FREE_MAGIC (0xceadbeef)
54
55 /*
56 * fields present at the start of a block (this is for internal use only)
57 */
58 typedef struct _HDR
59 {
60 ULONG Magic;
61 ULONG Size;
62 struct _HDR* previous;
63 } HDR, *PHDR;
64
65 typedef struct _HDR_USED
66 {
67 HDR hdr;
68 LIST_ENTRY ListEntry;
69 ULONG Tag;
70 PVOID Caller;
71 LIST_ENTRY TagListEntry;
72 BOOLEAN Dumped;
73 } HDR_USED, *PHDR_USED;
74
75 typedef struct _HDR_FREE
76 {
77 HDR hdr;
78 NODE Node;
79 } HDR_FREE, *PHDR_FREE;
80
81 #define HDR_FREE_SIZE ROUND_UP(sizeof(HDR_FREE), MM_POOL_ALIGNMENT)
82 #define HDR_USED_SIZE ROUND_UP(sizeof(HDR_USED), MM_POOL_ALIGNMENT)
83
84
85 /* GLOBALS *****************************************************************/
86
87 extern PVOID MiNonPagedPoolStart;
88 extern ULONG MiNonPagedPoolLength;
89
90 /*
91 * Head of the list of free blocks
92 */
93 static PNODE FreeBlockListRoot = NULL;
94
95 /*
96 * Head of the list of in use block
97 */
98 static LIST_ENTRY UsedBlockListHead;
99
100 static LIST_ENTRY AddressListHead;
101
102 /*
103 * Count of free blocks
104 */
105 static ULONG EiNrFreeBlocks = 0;
106
107 /*
108 * Count of used blocks
109 */
110 static ULONG EiNrUsedBlocks = 0;
111
112 /*
113 * Lock that protects the non-paged pool data structures
114 */
115 static KSPIN_LOCK MmNpoolLock;
116
117 /*
118 * Total memory used for free nonpaged pool blocks
119 */
120 ULONG EiFreeNonPagedPool = 0;
121
122 /*
123 * Total memory used for nonpaged pool blocks
124 */
125 ULONG EiUsedNonPagedPool = 0;
126
127 /* Total quota for Non Paged Pool */
128 ULONG MmTotalNonPagedPoolQuota = 0;
129
130 /*
131 * Allocate a range of memory in the nonpaged pool
132 */
133 PVOID
134 MiAllocNonPagedPoolRegion(unsigned int nr_pages);
135
136 VOID
137 MiFreeNonPagedPoolRegion(PVOID Addr, ULONG Count, BOOLEAN Free);
138
139 #ifdef TAG_STATISTICS_TRACKING
140 #define TAG_HASH_TABLE_SIZE (1024)
141 static LIST_ENTRY tag_hash_table[TAG_HASH_TABLE_SIZE];
142 #endif /* TAG_STATISTICS_TRACKING */
143
144 static PULONG MiNonPagedPoolAllocMap;
145 static ULONG MiNonPagedPoolNrOfPages;
146
147 /* avl helper functions ****************************************************/
148
149 void DumpFreeBlockNode(PNODE p)
150 {
151 static int count = 0;
152 HDR_FREE* blk;
153
154 count++;
155
156 if (p)
157 {
158 DumpFreeBlockNode(p->link[0]);
159 blk = CONTAINING_RECORD(p, HDR_FREE, Node);
160 DbgPrint("%08x %8d (%d)\n", blk, blk->hdr.Size, count);
161 DumpFreeBlockNode(p->link[1]);
162 }
163
164 count--;
165 }
166 void DumpFreeBlockTree(void)
167 {
168 DbgPrint("--- Begin tree ------------------\n");
169 DbgPrint("%08x\n", CONTAINING_RECORD(FreeBlockListRoot, HDR_FREE, Node));
170 DumpFreeBlockNode(FreeBlockListRoot);
171 DbgPrint("--- End tree --------------------\n");
172 }
173
174 int compare_node(PNODE p1, PNODE p2)
175 {
176 HDR_FREE* blk1 = CONTAINING_RECORD(p1, HDR_FREE, Node);
177 HDR_FREE* blk2 = CONTAINING_RECORD(p2, HDR_FREE, Node);
178
179 if (blk1->hdr.Size == blk2->hdr.Size)
180 {
181 if (blk1 < blk2)
182 {
183 return -1;
184 }
185 if (blk1 > blk2)
186 {
187 return 1;
188 }
189 }
190 else
191 {
192 if (blk1->hdr.Size < blk2->hdr.Size)
193 {
194 return -1;
195 }
196 if (blk1->hdr.Size > blk2->hdr.Size)
197 {
198 return 1;
199 }
200 }
201 return 0;
202
203 }
204
205 int compare_value(PVOID value, PNODE p)
206 {
207 HDR_FREE* blk = CONTAINING_RECORD(p, HDR_FREE, Node);
208 ULONG v = *(PULONG)value;
209
210 if (v < blk->hdr.Size)
211 {
212 return -1;
213 }
214 if (v > blk->hdr.Size)
215 {
216 return 1;
217 }
218 return 0;
219 }
220
221 /* avl functions **********************************************************/
222
223 /* FIXME:
224 * The avl functions should be moved into a separate file.
225 */
226
227 /* The avl_insert and avl_remove are based on libavl (library for manipulation of binary trees). */
228
229 void avl_insert (PNODE * root, PNODE n, int (*compare)(PNODE, PNODE))
230 {
231 PNODE y; /* Top node to update balance factor, and parent. */
232 PNODE p, q; /* Iterator, and parent. */
233 PNODE w; /* New root of rebalanced subtree. */
234 int dir = 0; /* Direction to descend. */
235
236 n->link[0] = n->link[1] = n->parent = NULL;
237 n->balance = 0;
238
239 y = *root;
240 for (q = NULL, p = *root; p != NULL; q = p, p = p->link[dir])
241 {
242 dir = compare(n, p) > 0;
243 if (p->balance != 0)
244 {
245 y = p;
246 }
247 }
248
249 n->parent = q;
250 if (q != NULL)
251 {
252 q->link[dir] = n;
253 }
254 else
255 {
256 *root = n;
257 }
258
259 if (*root == n)
260 {
261 return;
262 }
263
264 for (p = n; p != y; p = q)
265 {
266 q = p->parent;
267 dir = q->link[0] != p;
268 if (dir == 0)
269 {
270 q->balance--;
271 }
272 else
273 {
274 q->balance++;
275 }
276 }
277
278 if (y->balance == -2)
279 {
280 PNODE x = y->link[0];
281 if (x->balance == -1)
282 {
283 w = x;
284 y->link[0] = x->link[1];
285 x->link[1] = y;
286 x->balance = y->balance = 0;
287 x->parent = y->parent;
288 y->parent = x;
289 if (y->link[0] != NULL)
290 {
291 y->link[0]->parent = y;
292 }
293 }
294 else
295 {
296 ASSERT(x->balance == +1);
297 w = x->link[1];
298 x->link[1] = w->link[0];
299 w->link[0] = x;
300 y->link[0] = w->link[1];
301 w->link[1] = y;
302 if (w->balance == -1)
303 {
304 x->balance = 0;
305 y->balance = +1;
306 }
307 else if (w->balance == 0)
308 {
309 x->balance = y->balance = 0;
310 }
311 else /* |w->pavl_balance == +1| */
312 {
313 x->balance = -1;
314 y->balance = 0;
315 }
316 w->balance = 0;
317 w->parent = y->parent;
318 x->parent = y->parent = w;
319 if (x->link[1] != NULL)
320 {
321 x->link[1]->parent = x;
322 }
323 if (y->link[0] != NULL)
324 {
325 y->link[0]->parent = y;
326 }
327 }
328 }
329 else if (y->balance == +2)
330 {
331 PNODE x = y->link[1];
332 if (x->balance == +1)
333 {
334 w = x;
335 y->link[1] = x->link[0];
336 x->link[0] = y;
337 x->balance = y->balance = 0;
338 x->parent = y->parent;
339 y->parent = x;
340 if (y->link[1] != NULL)
341 {
342 y->link[1]->parent = y;
343 }
344 }
345 else
346 {
347 ASSERT(x->balance == -1);
348 w = x->link[0];
349 x->link[0] = w->link[1];
350 w->link[1] = x;
351 y->link[1] = w->link[0];
352 w->link[0] = y;
353 if (w->balance == 1)
354 {
355 x->balance = 0;
356 y->balance = -1;
357 }
358 else if (w->balance == 0)
359 {
360 x->balance = y->balance = 0;
361 }
362 else /* |w->pavl_balance == -1| */
363 {
364 x->balance = +1;
365 y->balance = 0;
366 }
367 w->balance = 0;
368 w->parent = y->parent;
369 x->parent = y->parent = w;
370 if (x->link[0] != NULL)
371 {
372 x->link[0]->parent = x;
373 }
374 if (y->link[1] != NULL)
375 {
376 y->link[1]->parent = y;
377 }
378 }
379 }
380 else
381 {
382 return;
383 }
384 if (w->parent != NULL)
385 {
386 w->parent->link[y != w->parent->link[0]] = w;
387 }
388 else
389 {
390 *root = w;
391 }
392
393 return;
394 }
395
396 void avl_remove (PNODE *root, PNODE item, int (*compare)(PNODE, PNODE))
397 {
398 PNODE p; /* Traverses tree to find node to delete. */
399 PNODE q; /* Parent of |p|. */
400 int dir; /* Side of |q| on which |p| is linked. */
401
402 if (root == NULL || *root == NULL)
403 {
404 return ;
405 }
406
407 p = item;
408 q = p->parent;
409 if (q == NULL)
410 {
411 q = (PNODE) root;
412 dir = 0;
413 }
414 else
415 {
416 dir = compare(p, q) > 0;
417 }
418
419 if (p->link[1] == NULL)
420 {
421 q->link[dir] = p->link[0];
422 if (q->link[dir] != NULL)
423 {
424 q->link[dir]->parent = p->parent;
425 }
426 }
427 else
428 {
429 PNODE r = p->link[1];
430 if (r->link[0] == NULL)
431 {
432 r->link[0] = p->link[0];
433 q->link[dir] = r;
434 r->parent = p->parent;
435 if (r->link[0] != NULL)
436 {
437 r->link[0]->parent = r;
438 }
439 r->balance = p->balance;
440 q = r;
441 dir = 1;
442 }
443 else
444 {
445 PNODE s = r->link[0];
446 while (s->link[0] != NULL)
447 {
448 s = s->link[0];
449 }
450 r = s->parent;
451 r->link[0] = s->link[1];
452 s->link[0] = p->link[0];
453 s->link[1] = p->link[1];
454 q->link[dir] = s;
455 if (s->link[0] != NULL)
456 {
457 s->link[0]->parent = s;
458 }
459 s->link[1]->parent = s;
460 s->parent = p->parent;
461 if (r->link[0] != NULL)
462 {
463 r->link[0]->parent = r;
464 }
465 s->balance = p->balance;
466 q = r;
467 dir = 0;
468 }
469 }
470
471 item->link[0] = item->link[1] = item->parent = NULL;
472 item->balance = 0;
473
474 while (q != (PNODE) root)
475 {
476 PNODE y = q;
477
478 if (y->parent != NULL)
479 {
480 q = y->parent;
481 }
482 else
483 {
484 q = (PNODE) root;
485 }
486
487 if (dir == 0)
488 {
489 dir = q->link[0] != y;
490 y->balance++;
491 if (y->balance == +1)
492 {
493 break;
494 }
495 else if (y->balance == +2)
496 {
497 PNODE x = y->link[1];
498 if (x->balance == -1)
499 {
500 PNODE w;
501
502 ASSERT(x->balance == -1);
503 w = x->link[0];
504 x->link[0] = w->link[1];
505 w->link[1] = x;
506 y->link[1] = w->link[0];
507 w->link[0] = y;
508 if (w->balance == +1)
509 {
510 x->balance = 0;
511 y->balance = -1;
512 }
513 else if (w->balance == 0)
514 {
515 x->balance = y->balance = 0;
516 }
517 else /* |w->pavl_balance == -1| */
518 {
519 x->balance = +1;
520 y->balance = 0;
521 }
522 w->balance = 0;
523 w->parent = y->parent;
524 x->parent = y->parent = w;
525 if (x->link[0] != NULL)
526 {
527 x->link[0]->parent = x;
528 }
529 if (y->link[1] != NULL)
530 {
531 y->link[1]->parent = y;
532 }
533 q->link[dir] = w;
534 }
535 else
536 {
537 y->link[1] = x->link[0];
538 x->link[0] = y;
539 x->parent = y->parent;
540 y->parent = x;
541 if (y->link[1] != NULL)
542 {
543 y->link[1]->parent = y;
544 }
545 q->link[dir] = x;
546 if (x->balance == 0)
547 {
548 x->balance = -1;
549 y->balance = +1;
550 break;
551 }
552 else
553 {
554 x->balance = y->balance = 0;
555 y = x;
556 }
557 }
558 }
559 }
560 else
561 {
562 dir = q->link[0] != y;
563 y->balance--;
564 if (y->balance == -1)
565 {
566 break;
567 }
568 else if (y->balance == -2)
569 {
570 PNODE x = y->link[0];
571 if (x->balance == +1)
572 {
573 PNODE w;
574 ASSERT(x->balance == +1);
575 w = x->link[1];
576 x->link[1] = w->link[0];
577 w->link[0] = x;
578 y->link[0] = w->link[1];
579 w->link[1] = y;
580 if (w->balance == -1)
581 {
582 x->balance = 0;
583 y->balance = +1;
584 }
585 else if (w->balance == 0)
586 {
587 x->balance = y->balance = 0;
588 }
589 else /* |w->pavl_balance == +1| */
590 {
591 x->balance = -1;
592 y->balance = 0;
593 }
594 w->balance = 0;
595 w->parent = y->parent;
596 x->parent = y->parent = w;
597 if (x->link[1] != NULL)
598 {
599 x->link[1]->parent = x;
600 }
601 if (y->link[0] != NULL)
602 {
603 y->link[0]->parent = y;
604 }
605 q->link[dir] = w;
606 }
607 else
608 {
609 y->link[0] = x->link[1];
610 x->link[1] = y;
611 x->parent = y->parent;
612 y->parent = x;
613 if (y->link[0] != NULL)
614 {
615 y->link[0]->parent = y;
616 }
617 q->link[dir] = x;
618 if (x->balance == 0)
619 {
620 x->balance = +1;
621 y->balance = -1;
622 break;
623 }
624 else
625 {
626 x->balance = y->balance = 0;
627 y = x;
628 }
629 }
630 }
631 }
632 }
633
634 }
635
636 PNODE _cdecl avl_get_first(PNODE root)
637 {
638 PNODE p;
639 if (root == NULL)
640 {
641 return NULL;
642 }
643 p = root;
644 while (p->link[0])
645 {
646 p = p->link[0];
647 }
648 return p;
649 }
650
651 PNODE avl_get_next(PNODE root, PNODE p)
652 {
653 PNODE q;
654 if (p->link[1])
655 {
656 p = p->link[1];
657 while(p->link[0])
658 {
659 p = p->link[0];
660 }
661 return p;
662 }
663 else
664 {
665 q = p->parent;
666 while (q && q->link[1] == p)
667 {
668 p = q;
669 q = q->parent;
670 }
671 if (q == NULL)
672 {
673 return NULL;
674 }
675 else
676 {
677 return q;
678 }
679 }
680 }
681
682 PNODE avl_find_equal_or_greater(PNODE root, ULONG size, int (compare)(PVOID, PNODE))
683 {
684 PNODE p;
685 PNODE prev = NULL;
686 int cmp;
687
688 for (p = root; p != NULL;)
689 {
690 cmp = compare((PVOID)&size, p);
691 if (cmp < 0)
692 {
693 prev = p;
694 p = p->link[0];
695 }
696 else if (cmp > 0)
697 {
698 p = p->link[1];
699 }
700 else
701 {
702 while (p->link[0])
703 {
704 cmp = compare((PVOID)&size, p->link[0]);
705 if (cmp != 0)
706 {
707 break;
708 }
709 p = p->link[0];
710 }
711 return p;
712 }
713 }
714 return prev;
715 }
716
717 /* non paged pool functions ************************************************/
718
719 #ifdef TAG_STATISTICS_TRACKING
720 VOID
721 MiRemoveFromTagHashTable(HDR_USED* block)
722 /*
723 * Remove a block from the tag hash table
724 */
725 {
726 if (block->Tag == 0)
727 {
728 return;
729 }
730 RemoveEntryList(&block->TagListEntry);
731 }
732
733 VOID
734 MiAddToTagHashTable(HDR_USED* block)
735 /*
736 * Add a block to the tag hash table
737 */
738 {
739 ULONG hash;
740
741 if (block->Tag == 0)
742 {
743 return;
744 }
745
746 hash = block->Tag % TAG_HASH_TABLE_SIZE;
747
748 InsertHeadList(&tag_hash_table[hash], &block->TagListEntry);
749 }
750 #endif /* TAG_STATISTICS_TRACKING */
751
752 #if defined(TAG_STATISTICS_TRACKING)
753 VOID STATIC
754 MiDumpTagStats(ULONG CurrentTag, ULONG CurrentNrBlocks, ULONG CurrentSize)
755 {
756 CHAR c1, c2, c3, c4;
757
758 c1 = (CHAR)((CurrentTag >> 24) & 0xFF);
759 c2 = (CHAR)((CurrentTag >> 16) & 0xFF);
760 c3 = (CHAR)((CurrentTag >> 8) & 0xFF);
761 c4 = (CHAR)(CurrentTag & 0xFF);
762
763 if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
764 {
765 DbgPrint("Tag %x (%c%c%c%c) Blocks %d Total Size %d Average Size %d\n",
766 CurrentTag, c4, c3, c2, c1, CurrentNrBlocks,
767 CurrentSize, CurrentSize / CurrentNrBlocks);
768 }
769 else
770 {
771 DbgPrint("Tag %x Blocks %d Total Size %d Average Size %d\n",
772 CurrentTag, CurrentNrBlocks, CurrentSize,
773 CurrentSize / CurrentNrBlocks);
774 }
775 }
776 #endif /* defined(TAG_STATISTICS_TRACKING) */
777
778 VOID
779 NTAPI
780 MiDebugDumpNonPagedPoolStats(BOOLEAN NewOnly)
781 {
782 #if defined(TAG_STATISTICS_TRACKING)
783 ULONG i;
784 HDR_USED* current;
785 ULONG CurrentTag;
786 ULONG CurrentNrBlocks = 0;
787 ULONG CurrentSize = 0;
788 ULONG TotalBlocks;
789 ULONG TotalSize;
790 ULONG Size;
791 LIST_ENTRY tmpListHead;
792 PLIST_ENTRY current_entry;
793
794 DbgPrint("******* Dumping non paging pool stats ******\n");
795 TotalBlocks = 0;
796 TotalSize = 0;
797 for (i = 0; i < TAG_HASH_TABLE_SIZE; i++)
798 {
799 InitializeListHead(&tmpListHead);
800
801 while (!IsListEmpty(&tag_hash_table[i]))
802 {
803 CurrentTag = 0;
804
805 current_entry = tag_hash_table[i].Flink;
806 while (current_entry != &tag_hash_table[i])
807 {
808 current = CONTAINING_RECORD(current_entry, HDR_USED, TagListEntry);
809 current_entry = current_entry->Flink;
810 if (CurrentTag == 0)
811 {
812 CurrentTag = current->Tag;
813 CurrentNrBlocks = 0;
814 CurrentSize = 0;
815 }
816 if (current->Tag == CurrentTag)
817 {
818 RemoveEntryList(&current->TagListEntry);
819 InsertHeadList(&tmpListHead, &current->TagListEntry);
820 if (!NewOnly || !current->Dumped)
821 {
822 CurrentNrBlocks++;
823 TotalBlocks++;
824 CurrentSize += current->hdr.Size;
825 TotalSize += current->hdr.Size;
826 current->Dumped = TRUE;
827 }
828 }
829 }
830 if (CurrentTag != 0 && CurrentNrBlocks != 0)
831 {
832 MiDumpTagStats(CurrentTag, CurrentNrBlocks, CurrentSize);
833 }
834 }
835 if (!IsListEmpty(&tmpListHead))
836 {
837 tag_hash_table[i].Flink = tmpListHead.Flink;
838 tag_hash_table[i].Flink->Blink = &tag_hash_table[i];
839 tag_hash_table[i].Blink = tmpListHead.Blink;
840 tag_hash_table[i].Blink->Flink = &tag_hash_table[i];
841 }
842 }
843 if (TotalBlocks != 0)
844 {
845 DbgPrint("TotalBlocks %d TotalSize %d AverageSize %d\n",
846 TotalBlocks, TotalSize, TotalSize / TotalBlocks);
847 }
848 else
849 {
850 DbgPrint("TotalBlocks %d TotalSize %d\n",
851 TotalBlocks, TotalSize);
852 }
853 Size = EiFreeNonPagedPool - (MiNonPagedPoolLength - MiNonPagedPoolNrOfPages * PAGE_SIZE);
854 DbgPrint("Freeblocks %d TotalFreeSize %d AverageFreeSize %d\n",
855 EiNrFreeBlocks, Size, EiNrFreeBlocks ? Size / EiNrFreeBlocks : 0);
856 DbgPrint("***************** Dump Complete ***************\n");
857 #endif /* defined(TAG_STATISTICS_TRACKING) */
858 }
859
860 VOID
861 NTAPI
862 MiDebugDumpNonPagedPool(BOOLEAN NewOnly)
863 {
864 #if defined(POOL_DEBUG_APIS)
865 HDR_USED* current;
866 PLIST_ENTRY current_entry;
867 KIRQL oldIrql;
868
869 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
870
871 DbgPrint("******* Dumping non paging pool contents ******\n");
872 current_entry = UsedBlockListHead.Flink;
873 while (current_entry != &UsedBlockListHead)
874 {
875 current = CONTAINING_RECORD(current_entry, HDR_USED, ListEntry);
876 if (!NewOnly || !current->Dumped)
877 {
878 CHAR c1, c2, c3, c4;
879
880 c1 = (CHAR)((current->Tag >> 24) & 0xFF);
881 c2 = (CHAR)((current->Tag >> 16) & 0xFF);
882 c3 = (CHAR)((current->Tag >> 8) & 0xFF);
883 c4 = (CHAR)(current->Tag & 0xFF);
884
885 if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
886 {
887 DbgPrint("Size 0x%x Tag 0x%x (%c%c%c%c) Allocator 0x%x\n",
888 current->hdr.Size, current->Tag, c4, c3, c2, c1,
889 current->Caller);
890 }
891 else
892 {
893 DbgPrint("Size 0x%x Tag 0x%x Allocator 0x%x\n",
894 current->hdr.Size, current->Tag, current->Caller);
895 }
896 current->Dumped = TRUE;
897 }
898 current_entry = current_entry->Flink;
899 }
900 DbgPrint("***************** Dump Complete ***************\n");
901 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
902 #endif
903 }
904
905 #ifdef ENABLE_VALIDATE_POOL
906 static void validate_free_list(void)
907 /*
908 * FUNCTION: Validate the integrity of the list of free blocks
909 */
910 {
911 HDR_FREE* current;
912 unsigned int blocks_seen=0;
913 PNODE p;
914
915 p = avl_get_first(FreeBlockListRoot);
916
917 while(p)
918 {
919 PVOID base_addr;
920
921 current = CONTAINING_RECORD(p, HDR_FREE, Node);
922 base_addr = (PVOID)current;
923
924 if (current->hdr.Magic != BLOCK_HDR_FREE_MAGIC)
925 {
926 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
927 current);
928 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
929 }
930
931 if (base_addr < MiNonPagedPoolStart ||
932 (ULONG_PTR)base_addr + current->hdr.Size > (ULONG_PTR)MiNonPagedPoolStart + MiNonPagedPoolLength)
933 {
934 DbgPrint("Block %x found outside pool area\n",current);
935 DbgPrint("Size %d\n",current->hdr.Size);
936 DbgPrint("Limits are %x %x\n",MiNonPagedPoolStart,
937 (ULONG_PTR)MiNonPagedPoolStart+MiNonPagedPoolLength);
938 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
939 }
940 blocks_seen++;
941 if (blocks_seen > EiNrFreeBlocks)
942 {
943 DbgPrint("Too many blocks on free list\n");
944 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
945 }
946 p = avl_get_next(FreeBlockListRoot, p);
947 }
948 }
949
950 static void validate_used_list(void)
951 /*
952 * FUNCTION: Validate the integrity of the list of used blocks
953 */
954 {
955 HDR_USED* current;
956 PLIST_ENTRY current_entry;
957 unsigned int blocks_seen=0;
958
959 current_entry = UsedBlockListHead.Flink;
960 while (current_entry != &UsedBlockListHead)
961 {
962 PVOID base_addr;
963
964 current = CONTAINING_RECORD(current_entry, HDR_USED, ListEntry);
965 base_addr = (PVOID)current;
966
967 if (current->hdr.Magic != BLOCK_HDR_USED_MAGIC)
968 {
969 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
970 current);
971 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
972 }
973 if (base_addr < MiNonPagedPoolStart ||
974 ((ULONG_PTR)base_addr+current->hdr.Size) >
975 (ULONG_PTR)MiNonPagedPoolStart+MiNonPagedPoolLength)
976 {
977 DbgPrint("Block %x found outside pool area\n",current);
978 DbgPrint("Size %d\n",current->hdr.Size);
979 DbgPrint("Limits are %x %x\n",MiNonPagedPoolStart,
980 (ULONG_PTR)MiNonPagedPoolStart+MiNonPagedPoolLength);
981 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
982 }
983 blocks_seen++;
984 if (blocks_seen > EiNrUsedBlocks)
985 {
986 DbgPrint("Too many blocks on used list\n");
987 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
988 }
989 if (current->ListEntry.Flink != &UsedBlockListHead &&
990 current->ListEntry.Flink->Blink != &current->ListEntry)
991 {
992 DbgPrint("%s:%d:Break in list (current %x next %x "
993 "current->next->previous %x)\n",
994 __FILE__,__LINE__,current, current->ListEntry.Flink,
995 current->ListEntry.Flink->Blink);
996 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
997 }
998
999 current_entry = current_entry->Flink;
1000 }
1001 }
1002
1003 static void check_duplicates(HDR* blk)
1004 /*
1005 * FUNCTION: Check a block has no duplicates
1006 * ARGUMENTS:
1007 * blk = block to check
1008 * NOTE: Bug checks if duplicates are found
1009 */
1010 {
1011 ULONG_PTR base = (ULONG_PTR)blk;
1012 ULONG_PTR last = (ULONG_PTR)blk + blk->Size;
1013 PLIST_ENTRY current_entry;
1014 PNODE p;
1015 HDR_FREE* free;
1016 HDR_USED* used;
1017
1018 p = avl_get_first(FreeBlockListRoot);
1019
1020 while (p)
1021 {
1022 free = CONTAINING_RECORD(p, HDR_FREE, Node);
1023 if (free->hdr.Magic != BLOCK_HDR_FREE_MAGIC)
1024 {
1025 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
1026 free);
1027 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1028 }
1029
1030 if ( (ULONG_PTR)free > base && (ULONG_PTR)free < last )
1031 {
1032 DbgPrint("intersecting blocks on list\n");
1033 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1034 }
1035 if ( (ULONG_PTR)free < base &&
1036 ((ULONG_PTR)free + free->hdr.Size) > base )
1037 {
1038 DbgPrint("intersecting blocks on list\n");
1039 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1040 }
1041 p = avl_get_next(FreeBlockListRoot, p);
1042 }
1043
1044 current_entry = UsedBlockListHead.Flink;
1045 while (current_entry != &UsedBlockListHead)
1046 {
1047 used = CONTAINING_RECORD(current_entry, HDR_USED, ListEntry);
1048
1049 if ( (ULONG_PTR)used > base && (ULONG_PTR)used < last )
1050 {
1051 DbgPrint("intersecting blocks on list\n");
1052 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1053 }
1054 if ( (ULONG_PTR)used < base &&
1055 ((ULONG_PTR)used + used->hdr.Size) > base )
1056 {
1057 DbgPrint("intersecting blocks on list\n");
1058 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1059 }
1060
1061 current_entry = current_entry->Flink;
1062 }
1063
1064 }
1065
1066 static void validate_kernel_pool(void)
1067 /*
1068 * FUNCTION: Checks the integrity of the kernel memory heap
1069 */
1070 {
1071 HDR_FREE* free;
1072 HDR_USED* used;
1073 PLIST_ENTRY current_entry;
1074 PNODE p;
1075
1076 validate_free_list();
1077 validate_used_list();
1078
1079 p = avl_get_first(FreeBlockListRoot);
1080 while (p)
1081 {
1082 free = CONTAINING_RECORD(p, HDR_FREE, Node);
1083 check_duplicates(&free->hdr);
1084 p = avl_get_next(FreeBlockListRoot, p);
1085 }
1086 current_entry = UsedBlockListHead.Flink;
1087 while (current_entry != &UsedBlockListHead)
1088 {
1089 used = CONTAINING_RECORD(current_entry, HDR_USED, ListEntry);
1090 check_duplicates(&used->hdr);
1091 current_entry = current_entry->Flink;
1092 }
1093 }
1094 #endif
1095
1096 #if 0
1097 STATIC VOID
1098 free_pages(HDR_FREE* blk)
1099 {
1100 ULONG start;
1101 ULONG end;
1102
1103 start = (ULONG_PTR)blk;
1104 end = (ULONG_PTR)blk + blk->hdr.Size;
1105
1106 /*
1107 * If the block doesn't contain a whole page then there is nothing to do
1108 */
1109 if (PAGE_ROUND_UP(start) >= PAGE_ROUND_DOWN(end))
1110 {
1111 return;
1112 }
1113 }
1114 #endif
1115
1116 static void remove_from_used_list(HDR_USED* current)
1117 {
1118 RemoveEntryList(&current->ListEntry);
1119 EiUsedNonPagedPool -= current->hdr.Size;
1120 EiNrUsedBlocks--;
1121 }
1122
1123 static void remove_from_free_list(HDR_FREE* current)
1124 {
1125 DPRINT("remove_from_free_list %d\n", current->hdr.Size);
1126
1127 avl_remove(&FreeBlockListRoot, &current->Node, compare_node);
1128
1129 EiFreeNonPagedPool -= current->hdr.Size;
1130 EiNrFreeBlocks--;
1131 DPRINT("remove_from_free_list done\n");
1132 #ifdef DUMP_AVL
1133
1134 DumpFreeBlockTree();
1135 #endif
1136 }
1137
1138 static void
1139 add_to_free_list(HDR_FREE* blk)
1140 /*
1141 * FUNCTION: add the block to the free list (internal)
1142 */
1143 {
1144 HDR_FREE* current;
1145 BOOL UpdatePrevPtr = FALSE;
1146
1147 DPRINT("add_to_free_list (%d)\n", blk->hdr.Size);
1148
1149 EiNrFreeBlocks++;
1150
1151 current = (HDR_FREE*)blk->hdr.previous;
1152 if (current && current->hdr.Magic == BLOCK_HDR_FREE_MAGIC)
1153 {
1154 remove_from_free_list(current);
1155 current->hdr.Size = current->hdr.Size + blk->hdr.Size;
1156 current->hdr.Magic = BLOCK_HDR_USED_MAGIC;
1157 memset(blk, 0xcc, HDR_USED_SIZE);
1158 blk = current;
1159 UpdatePrevPtr = TRUE;
1160 }
1161
1162 current = (HDR_FREE*)((ULONG_PTR)blk + blk->hdr.Size);
1163 if ((char*)current < (char*)MiNonPagedPoolStart + MiNonPagedPoolLength &&
1164 current->hdr.Magic == BLOCK_HDR_FREE_MAGIC)
1165 {
1166 remove_from_free_list(current);
1167 blk->hdr.Size += current->hdr.Size;
1168 memset(current, 0xcc, HDR_FREE_SIZE);
1169 UpdatePrevPtr = TRUE;
1170 current = (HDR_FREE*)((ULONG_PTR)blk + blk->hdr.Size);
1171 }
1172 if (UpdatePrevPtr &&
1173 (char*)current < (char*)MiNonPagedPoolStart + MiNonPagedPoolLength)
1174 {
1175 current->hdr.previous = &blk->hdr;
1176 }
1177 DPRINT("%d\n", blk->hdr.Size);
1178 blk->hdr.Magic = BLOCK_HDR_FREE_MAGIC;
1179 EiFreeNonPagedPool += blk->hdr.Size;
1180 avl_insert(&FreeBlockListRoot, &blk->Node, compare_node);
1181 DPRINT("add_to_free_list done\n");
1182 #ifdef DUMP_AVL
1183
1184 DumpFreeBlockTree();
1185 #endif
1186 }
1187
1188 static void add_to_used_list(HDR_USED* blk)
1189 /*
1190 * FUNCTION: add the block to the used list (internal)
1191 */
1192 {
1193 InsertHeadList(&UsedBlockListHead, &blk->ListEntry);
1194 EiUsedNonPagedPool += blk->hdr.Size;
1195 EiNrUsedBlocks++;
1196 }
1197
1198
1199 static BOOLEAN
1200 grow_block(HDR_FREE* blk, PVOID end)
1201 {
1202 NTSTATUS Status;
1203 PFN_TYPE Page[32];
1204 ULONG_PTR StartIndex, EndIndex;
1205 ULONG i, j, k;
1206
1207 StartIndex = (ULONG_PTR)(PAGE_ROUND_UP((ULONG_PTR)blk + HDR_FREE_SIZE - (ULONG_PTR)MiNonPagedPoolStart)) / PAGE_SIZE;
1208 EndIndex = ((ULONG_PTR)PAGE_ROUND_UP(end) - (ULONG_PTR)MiNonPagedPoolStart) / PAGE_SIZE;
1209
1210
1211 for (i = StartIndex; i < EndIndex; i++)
1212 {
1213 if (!(MiNonPagedPoolAllocMap[i / 32] & (1 << (i % 32))))
1214 {
1215 for (j = i + 1; j < EndIndex && j - i < 32; j++)
1216 {
1217 if (MiNonPagedPoolAllocMap[j / 32] & (1 << (j % 32)))
1218 {
1219 break;
1220 }
1221 }
1222 for (k = 0; k < j - i; k++)
1223 {
1224 Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &Page[k]);
1225 if (!NT_SUCCESS(Status))
1226 {
1227 for (i = 0; i < k; i++)
1228 {
1229 MmReleasePageMemoryConsumer(MC_NPPOOL, Page[i]);
1230 }
1231 return FALSE;
1232 }
1233 }
1234 Status = MmCreateVirtualMapping(NULL,
1235 (PVOID)((ULONG_PTR)MiNonPagedPoolStart + i * PAGE_SIZE),
1236 PAGE_READWRITE|PAGE_SYSTEM,
1237 Page,
1238 k);
1239 if (!NT_SUCCESS(Status))
1240 {
1241 for (i = 0; i < k; i++)
1242 {
1243 MmReleasePageMemoryConsumer(MC_NPPOOL, Page[i]);
1244 }
1245 return FALSE;
1246 }
1247 for (j = i; j < k + i; j++)
1248 {
1249 MiNonPagedPoolAllocMap[j / 32] |= (1 << (j % 32));
1250 }
1251 MiNonPagedPoolNrOfPages += k;
1252 i += k - 1;
1253 }
1254 }
1255 return TRUE;
1256 }
1257
1258 static HDR_USED* get_block(unsigned int size, unsigned long alignment)
1259 {
1260 HDR_FREE *blk, *current, *previous = NULL, *next = NULL, *best = NULL;
1261 ULONG previous_size = 0, current_size, next_size = 0, new_size;
1262 PVOID end;
1263 PVOID addr, aligned_addr, best_aligned_addr=NULL;
1264 PNODE p;
1265
1266 DPRINT("get_block %d\n", size);
1267
1268 p = avl_find_equal_or_greater(FreeBlockListRoot, size + HDR_USED_SIZE, compare_value);
1269 while (p)
1270 {
1271 current = CONTAINING_RECORD(p, HDR_FREE, Node);
1272 addr = (PVOID)((ULONG_PTR)current + HDR_USED_SIZE);
1273 /* calculate first aligned address available within this block */
1274 aligned_addr = alignment > 0 ? MM_ROUND_UP(addr, alignment) : addr;
1275 if (size < PAGE_SIZE)
1276 {
1277 /* check that the block is in one page */
1278 if (PAGE_ROUND_DOWN(aligned_addr) != PAGE_ROUND_DOWN((ULONG_PTR)aligned_addr + size - 1))
1279 {
1280 aligned_addr = (PVOID)PAGE_ROUND_UP(aligned_addr);
1281 }
1282 }
1283 DPRINT("%x %x\n", addr, aligned_addr);
1284 if (aligned_addr != addr)
1285 {
1286 while((ULONG_PTR)aligned_addr - (ULONG_PTR)addr < HDR_FREE_SIZE)
1287 {
1288 if (alignment == 0)
1289 {
1290 aligned_addr = (PVOID)((ULONG_PTR)current + HDR_USED_SIZE + HDR_FREE_SIZE);
1291 }
1292 else
1293 {
1294 aligned_addr = MM_ROUND_UP((PVOID)((ULONG_PTR)current + HDR_USED_SIZE + HDR_FREE_SIZE), alignment);
1295 }
1296 if (size < PAGE_SIZE)
1297 {
1298 /* check that the block is in one page */
1299 if (PAGE_ROUND_DOWN(aligned_addr) != PAGE_ROUND_DOWN((ULONG_PTR)aligned_addr + size - 1))
1300 {
1301 aligned_addr = (PVOID)PAGE_ROUND_UP(aligned_addr);
1302 }
1303 }
1304 }
1305 }
1306 DPRINT("%x %x\n", addr, aligned_addr);
1307 new_size = (ULONG_PTR)aligned_addr - (ULONG_PTR)addr + size;
1308 if (current->hdr.Size >= new_size + HDR_USED_SIZE &&
1309 (best == NULL || current->hdr.Size < best->hdr.Size))
1310 {
1311 best = current;
1312 best_aligned_addr = aligned_addr;
1313 if (new_size <= size + 2 * HDR_FREE_SIZE)
1314 {
1315 break;
1316 }
1317 }
1318
1319 if (best)
1320 {
1321 if (size < PAGE_SIZE)
1322 {
1323 if (current->hdr.Size >= 2 * PAGE_SIZE + HDR_FREE_SIZE)
1324 {
1325 break;
1326 }
1327 }
1328 else
1329 {
1330 if (current->hdr.Size >= size + alignment + HDR_FREE_SIZE)
1331 {
1332 break;
1333 }
1334 }
1335 }
1336 p = avl_get_next(FreeBlockListRoot, p);
1337 }
1338 /*
1339 * We didn't find anything suitable at all.
1340 */
1341 if (best == NULL)
1342 {
1343 return NULL;
1344 }
1345
1346 DPRINT(":: blk %x blk->hdr.Size %d (%d) Size %d\n", best, best->hdr.Size, best->hdr.Size - HDR_USED_SIZE, size);
1347
1348 current = best;
1349 current_size = current->hdr.Size - HDR_USED_SIZE;
1350 addr = (PVOID)((ULONG_PTR)current + HDR_USED_SIZE);
1351 if (addr != best_aligned_addr)
1352 {
1353 blk = (HDR_FREE*)((ULONG_PTR)best_aligned_addr - HDR_USED_SIZE);
1354 /*
1355 * if size-aligned, break off the preceding bytes into their own block...
1356 */
1357 previous = current;
1358 previous_size = (ULONG_PTR)blk - (ULONG_PTR)previous - HDR_FREE_SIZE;
1359 current = blk;
1360 current_size -= ((ULONG_PTR)current - (ULONG_PTR)previous);
1361 }
1362 end = (PVOID)((ULONG_PTR)current + HDR_USED_SIZE + size);
1363
1364 if (current_size >= size + HDR_FREE_SIZE + MM_POOL_ALIGNMENT)
1365 {
1366 /* create a new free block after our block, if the memory size is >= 4 byte for this block */
1367 next = (HDR_FREE*)((ULONG_PTR)current + size + HDR_USED_SIZE);
1368 next_size = current_size - size - HDR_FREE_SIZE;
1369 current_size = size;
1370 end = (PVOID)((ULONG_PTR)next + HDR_FREE_SIZE);
1371 }
1372
1373 if (previous)
1374 {
1375 remove_from_free_list(previous);
1376 if (!grow_block(previous, end))
1377 {
1378 add_to_free_list(previous);
1379 return NULL;
1380 }
1381 memset(current, 0, HDR_USED_SIZE);
1382 current->hdr.Size = current_size + HDR_USED_SIZE;
1383 current->hdr.Magic = BLOCK_HDR_USED_MAGIC;
1384 current->hdr.previous = &previous->hdr;
1385 previous->hdr.Size = previous_size + HDR_FREE_SIZE;
1386 if (next == NULL)
1387 {
1388 blk = (HDR_FREE*)((ULONG_PTR)current + current->hdr.Size);
1389 if ((ULONG_PTR)blk < (ULONG_PTR)MiNonPagedPoolStart + MiNonPagedPoolLength)
1390 {
1391 blk->hdr.previous = &current->hdr;
1392 }
1393 }
1394
1395 add_to_free_list(previous);
1396 }
1397 else
1398 {
1399 remove_from_free_list(current);
1400
1401 if (!grow_block(current, end))
1402 {
1403 add_to_free_list(current);
1404 return NULL;
1405 }
1406 current->hdr.Magic = BLOCK_HDR_USED_MAGIC;
1407 if (next)
1408 {
1409 current->hdr.Size = current_size + HDR_USED_SIZE;
1410 }
1411 }
1412
1413 if (next)
1414 {
1415 memset(next, 0, HDR_FREE_SIZE);
1416 next->hdr.Size = next_size + HDR_FREE_SIZE;
1417 next->hdr.Magic = BLOCK_HDR_FREE_MAGIC;
1418 next->hdr.previous = &current->hdr;
1419 blk = (HDR_FREE*)((ULONG_PTR)next + next->hdr.Size);
1420 if ((ULONG_PTR)blk < (ULONG_PTR)MiNonPagedPoolStart + MiNonPagedPoolLength)
1421 {
1422 blk->hdr.previous = &next->hdr;
1423 }
1424 add_to_free_list(next);
1425 }
1426
1427 add_to_used_list((HDR_USED*)current);
1428 VALIDATE_POOL;
1429
1430 if (size < PAGE_SIZE)
1431 {
1432 addr = (PVOID)((ULONG_PTR)current + HDR_USED_SIZE);
1433 if (PAGE_ROUND_DOWN(addr) != PAGE_ROUND_DOWN((PVOID)((ULONG_PTR)addr + size - 1)))
1434 {
1435 DPRINT1("%x %x\n", addr, (ULONG_PTR)addr + size);
1436 }
1437 ASSERT (PAGE_ROUND_DOWN(addr) == PAGE_ROUND_DOWN((PVOID)((ULONG_PTR)addr + size - 1)));
1438 }
1439 if (alignment)
1440 {
1441 addr = (PVOID)((ULONG_PTR)current + HDR_USED_SIZE);
1442 ASSERT(MM_ROUND_UP(addr, alignment) == addr);
1443 }
1444 return (HDR_USED*)current;
1445 }
1446
1447 ULONG STDCALL
1448 ExRosQueryNonPagedPoolTag ( PVOID Addr )
1449 {
1450 HDR_USED* blk=(HDR_USED*)((ULONG_PTR)Addr - HDR_USED_SIZE);
1451 if (blk->hdr.Magic != BLOCK_HDR_USED_MAGIC)
1452 KEBUGCHECK(0);
1453
1454 return blk->Tag;
1455 }
1456
1457 VOID STDCALL ExFreeNonPagedPool (PVOID block)
1458 /*
1459 * FUNCTION: Releases previously allocated memory
1460 * ARGUMENTS:
1461 * block = block to free
1462 */
1463 {
1464 HDR_USED* blk=(HDR_USED*)((ULONG_PTR)block - HDR_USED_SIZE);
1465 KIRQL oldIrql;
1466
1467 if (block == NULL)
1468 {
1469 return;
1470 }
1471
1472 DPRINT("freeing block %x\n",blk);
1473
1474 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->hdr.Size,
1475 blk->Caller);
1476 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1477
1478 VALIDATE_POOL;
1479 if (blk->hdr.Magic != BLOCK_HDR_USED_MAGIC)
1480 {
1481 if (blk->hdr.Magic == BLOCK_HDR_FREE_MAGIC)
1482 {
1483 DbgPrint("ExFreePool of already freed address %x\n", block);
1484 }
1485 else
1486 {
1487 DbgPrint("ExFreePool of non-allocated address %x (magic %x)\n",
1488 block, blk->hdr.Magic);
1489 }
1490 KEBUGCHECK(0);
1491 return;
1492 }
1493 memset(block, 0xcc, blk->hdr.Size - HDR_USED_SIZE);
1494 #ifdef TAG_STATISTICS_TRACKING
1495
1496 MiRemoveFromTagHashTable(blk);
1497 #endif
1498
1499 remove_from_used_list(blk);
1500 blk->hdr.Magic = BLOCK_HDR_FREE_MAGIC;
1501 add_to_free_list((HDR_FREE*)blk);
1502 VALIDATE_POOL;
1503 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1504 }
1505
1506 PVOID STDCALL
1507 ExAllocateNonPagedPoolWithTag(POOL_TYPE Type, ULONG Size, ULONG Tag, PVOID Caller)
1508 {
1509 PVOID block;
1510 HDR_USED* best = NULL;
1511 KIRQL oldIrql;
1512 ULONG alignment;
1513
1514 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
1515 Size,Caller);
1516
1517 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1518
1519 VALIDATE_POOL;
1520
1521 #if 0
1522 /* after some allocations print the npaged pool stats */
1523 #ifdef TAG_STATISTICS_TRACKING
1524
1525 {
1526 static ULONG counter = 0;
1527 if (counter++ % 100000 == 0)
1528 {
1529 MiDebugDumpNonPagedPoolStats(FALSE);
1530 }
1531 }
1532 #endif
1533 #endif
1534 /*
1535 * accomodate this useful idiom
1536 */
1537 if (Size == 0)
1538 {
1539 POOL_TRACE("= NULL\n");
1540 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1541 return(NULL);
1542 }
1543 /* Make the size dword alligned, this makes the block dword alligned */
1544 Size = ROUND_UP(Size, MM_POOL_ALIGNMENT);
1545
1546 if (Size >= PAGE_SIZE)
1547 {
1548 alignment = PAGE_SIZE;
1549 }
1550 else if (Type & CACHE_ALIGNED_POOL_MASK)
1551 {
1552 alignment = MM_CACHE_LINE_SIZE;
1553 }
1554 else
1555 {
1556 alignment = 0;
1557 }
1558
1559 best = get_block(Size, alignment);
1560 if (best == NULL)
1561 {
1562 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1563 DPRINT1("Trying to allocate %lu bytes from nonpaged pool - nothing suitable found, returning NULL\n",
1564 Size );
1565 return NULL;
1566 }
1567 best->Tag = Tag;
1568 best->Caller = Caller;
1569 best->Dumped = FALSE;
1570 best->TagListEntry.Flink = best->TagListEntry.Blink = NULL;
1571 #ifdef TAG_STATISTICS_TRACKING
1572
1573 MiAddToTagHashTable(best);
1574 #endif
1575
1576 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1577 block = (PVOID)((ULONG_PTR)best + HDR_USED_SIZE);
1578 /* RtlZeroMemory(block, Size);*/
1579 return(block);
1580 }
1581
1582 VOID
1583 INIT_FUNCTION
1584 NTAPI
1585 MiInitializeNonPagedPool(VOID)
1586 {
1587 NTSTATUS Status;
1588 PFN_TYPE Page;
1589 ULONG i;
1590 PVOID Address;
1591 HDR_USED* used;
1592 HDR_FREE* free;
1593 #ifdef TAG_STATISTICS_TRACKING
1594
1595 for (i = 0; i < TAG_HASH_TABLE_SIZE; i++)
1596 {
1597 InitializeListHead(&tag_hash_table[i]);
1598 }
1599 #endif
1600 KeInitializeSpinLock(&MmNpoolLock);
1601 InitializeListHead(&UsedBlockListHead);
1602 InitializeListHead(&AddressListHead);
1603 FreeBlockListRoot = NULL;
1604
1605 MiNonPagedPoolAllocMap = (PVOID)((ULONG_PTR)MiNonPagedPoolStart + PAGE_SIZE);
1606 MiNonPagedPoolNrOfPages = PAGE_ROUND_UP(ROUND_UP(MiNonPagedPoolLength / PAGE_SIZE, 32) / 8 + HDR_FREE_SIZE) + PAGE_SIZE;
1607 MiNonPagedPoolNrOfPages /= PAGE_SIZE;
1608 Address = MiNonPagedPoolStart;
1609
1610 for (i = 0; i < MiNonPagedPoolNrOfPages; i++)
1611 {
1612 Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &Page);
1613 if (!NT_SUCCESS(Status))
1614 {
1615 DbgPrint("Unable to allocate a page\n");
1616 KEBUGCHECK(0);
1617 }
1618
1619 Status = MmCreateVirtualMapping(NULL,
1620 Address,
1621 PAGE_READWRITE|PAGE_SYSTEM,
1622 &Page,
1623 1);
1624 if (!NT_SUCCESS(Status))
1625 {
1626 DbgPrint("Unable to create virtual mapping\n");
1627 KEBUGCHECK(0);
1628 }
1629 Address = (PVOID)((ULONG_PTR)Address + PAGE_SIZE);
1630 }
1631
1632 for (i = 0; i < MiNonPagedPoolNrOfPages; i++)
1633 {
1634 MiNonPagedPoolAllocMap[i / 32] |= (1 << (i % 32));
1635 }
1636
1637 /* the first block is free */
1638 free = (HDR_FREE*)MiNonPagedPoolStart;
1639 free->hdr.Magic = BLOCK_HDR_FREE_MAGIC;
1640 free->hdr.Size = PAGE_SIZE - HDR_USED_SIZE;
1641 free->hdr.previous = NULL;
1642 memset((PVOID)((ULONG_PTR)free + HDR_FREE_SIZE), 0xcc, free->hdr.Size - HDR_FREE_SIZE);
1643 add_to_free_list(free);
1644
1645 /* the second block contains the non paged pool bitmap */
1646 used = (HDR_USED*)((ULONG_PTR)free + free->hdr.Size);
1647 used->hdr.Magic = BLOCK_HDR_USED_MAGIC;
1648 used->hdr.Size = ROUND_UP(MiNonPagedPoolLength / PAGE_SIZE, 32) / 8 + HDR_USED_SIZE;
1649 used->hdr.previous = &free->hdr;
1650 used->Tag = 0xffffffff;
1651 used->Caller = 0;
1652 used->Dumped = FALSE;
1653 add_to_used_list(used);
1654 #ifdef TAG_STATISTICS_TRACKING
1655 MiAddToTagHashTable(used);
1656 #endif
1657
1658 /* the third block is the free block after the bitmap */
1659 free = (HDR_FREE*)((ULONG_PTR)used + used->hdr.Size);
1660 free->hdr.Magic = BLOCK_HDR_FREE_MAGIC;
1661 free->hdr.Size = MiNonPagedPoolLength - ((ULONG_PTR)free - (ULONG_PTR)MiNonPagedPoolStart);
1662 free->hdr.previous = &used->hdr;
1663 memset((PVOID)((ULONG_PTR)free + HDR_FREE_SIZE), 0x0cc, (ULONG_PTR)Address - (ULONG_PTR)free - HDR_FREE_SIZE);
1664 add_to_free_list(free);
1665 }
1666
1667 PVOID
1668 STDCALL
1669 MiAllocateSpecialPool (IN POOL_TYPE PoolType,
1670 IN SIZE_T NumberOfBytes,
1671 IN ULONG Tag,
1672 IN ULONG Underrun
1673 )
1674 {
1675 /* FIXME: Special Pools not Supported */
1676 DbgPrint("Special Pools not supported\n");
1677 return NULL;
1678 }
1679
1680 /* EOF */