another patch by Mike Nordell. Fixed pointer math to be AMD64 compatible per David...
[reactos.git] / reactos / ntoskrnl / mm / npool.c
1 /* $Id: npool.c,v 1.73 2003/07/29 19:43:13 royce Exp $
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/npool.c
6 * PURPOSE: Implements the kernel memory pool
7 * PROGRAMMER: David Welch (welch@cwcom.net)
8 * UPDATE HISTORY:
9 * 27/05/98: Created
10 * 10/06/98: Bug fixes by Iwan Fatahi (i_fatahi@hotmail.com)
11 * in take_block (if current bigger than required)
12 * in remove_from_used_list
13 * in ExFreePool
14 * 23/08/98: Fixes from Robert Bergkvist (fragdance@hotmail.com)
15 */
16
17 /* INCLUDES ****************************************************************/
18
19 #include <ddk/ntddk.h>
20 #include <internal/mm.h>
21 #include <internal/ntoskrnl.h>
22 #include <internal/pool.h>
23
24 #define NDEBUG
25 #include <internal/debug.h>
26
27 /* Enable strict checking of the nonpaged pool on every allocation */
28 //#define ENABLE_VALIDATE_POOL
29
30 /* Enable tracking of statistics about the tagged blocks in the pool */
31 #define TAG_STATISTICS_TRACKING
32
33 /*
34 * Put each block in its own range of pages and position the block at the
35 * end of the range so any accesses beyond the end of block are to invalid
36 * memory locations.
37 */
38 //#define WHOLE_PAGE_ALLOCATIONS
39
40 #ifdef ENABLE_VALIDATE_POOL
41 #define VALIDATE_POOL validate_kernel_pool()
42 #else
43 #define VALIDATE_POOL
44 #endif
45
46 #if 0
47 #define POOL_TRACE(args...) do { DbgPrint(args); } while(0);
48 #else
49 #define POOL_TRACE(args...)
50 #endif
51
52 /* avl types ****************************************************************/
53
54 /* FIXME:
55 * This declarations should be moved into a separate header file.
56 */
57
58 typedef struct _NODE
59 {
60 struct _NODE* link[2];
61 struct _NODE* parent;
62 signed char balance;
63 } NODE, *PNODE;
64
65 /* TYPES *******************************************************************/
66
67 #define BLOCK_HDR_USED_MAGIC (0xdeadbeef)
68 #define BLOCK_HDR_FREE_MAGIC (0xceadbeef)
69
70 /*
71 * fields present at the start of a block (this is for internal use only)
72 */
73 typedef struct _BLOCK_HDR
74 {
75 ULONG Magic;
76 ULONG Size;
77 LIST_ENTRY ListEntry;
78 LIST_ENTRY AddressList;
79 LIST_ENTRY TagListEntry;
80 NODE Node;
81 ULONG Tag;
82 PVOID Caller;
83 BOOLEAN Dumped;
84 } BLOCK_HDR;
85
86 PVOID STDCALL
87 ExAllocateWholePageBlock(ULONG Size);
88 VOID STDCALL
89 ExFreeWholePageBlock(PVOID Addr);
90
91 /* GLOBALS *****************************************************************/
92
93 extern PVOID MiNonPagedPoolStart;
94 extern ULONG MiNonPagedPoolLength;
95 static ULONG MiCurrentNonPagedPoolLength = 0;
96
97 /*
98 * Head of the list of free blocks
99 */
100 static PNODE FreeBlockListRoot = NULL;
101
102 /*
103 * Head of the list of in use block
104 */
105 static LIST_ENTRY UsedBlockListHead;
106
107 static LIST_ENTRY AddressListHead;
108
109 #ifndef WHOLE_PAGE_ALLOCATIONS
110 /*
111 * Count of free blocks
112 */
113 static ULONG EiNrFreeBlocks = 0;
114
115 /*
116 * Count of used blocks
117 */
118 static ULONG EiNrUsedBlocks = 0;
119 #endif
120
121 /*
122 * Lock that protects the non-paged pool data structures
123 */
124 static KSPIN_LOCK MmNpoolLock;
125
126 /*
127 * Total memory used for free nonpaged pool blocks
128 */
129 ULONG EiFreeNonPagedPool = 0;
130
131 /*
132 * Total memory used for nonpaged pool blocks
133 */
134 ULONG EiUsedNonPagedPool = 0;
135
136 /*
137 * Allocate a range of memory in the nonpaged pool
138 */
139 PVOID
140 MiAllocNonPagedPoolRegion(unsigned int nr_pages);
141
142 VOID
143 MiFreeNonPagedPoolRegion(PVOID Addr, ULONG Count, BOOLEAN Free);
144
145 #ifdef TAG_STATISTICS_TRACKING
146 #define TAG_HASH_TABLE_SIZE (1024)
147 static LIST_ENTRY tag_hash_table[TAG_HASH_TABLE_SIZE];
148 #endif /* TAG_STATISTICS_TRACKING */
149
150 /* avl helper functions ****************************************************/
151
152 void DumpFreeBlockNode(PNODE p)
153 {
154 static int count = 0;
155 BLOCK_HDR* blk;
156
157 count++;
158
159 if (p)
160 {
161 DumpFreeBlockNode(p->link[0]);
162 blk = CONTAINING_RECORD(p, BLOCK_HDR, Node);
163 DbgPrint("%08x %8d (%d)\n", blk, blk->Size, count);
164 DumpFreeBlockNode(p->link[1]);
165 }
166
167 count--;
168 }
169 void DumpFreeBlockTree(void)
170 {
171 DbgPrint("--- Begin tree ------------------\n");
172 DbgPrint("%08x\n", CONTAINING_RECORD(FreeBlockListRoot, BLOCK_HDR, Node));
173 DumpFreeBlockNode(FreeBlockListRoot);
174 DbgPrint("--- End tree --------------------\n");
175 }
176
177 int compare_node(PNODE p1, PNODE p2)
178 {
179 BLOCK_HDR* blk1 = CONTAINING_RECORD(p1, BLOCK_HDR, Node);
180 BLOCK_HDR* blk2 = CONTAINING_RECORD(p2, BLOCK_HDR, Node);
181
182 if (blk1->Size == blk2->Size)
183 {
184 if (blk1 < blk2)
185 {
186 return -1;
187 }
188 if (blk1 > blk2)
189 {
190 return 1;
191 }
192 }
193 else
194 {
195 if (blk1->Size < blk2->Size)
196 {
197 return -1;
198 }
199 if (blk1->Size > blk2->Size)
200 {
201 return 1;
202 }
203 }
204 return 0;
205
206 }
207
208 int compare_value(PVOID value, PNODE p)
209 {
210 BLOCK_HDR* blk = CONTAINING_RECORD(p, BLOCK_HDR, Node);
211 ULONG v = *(PULONG)value;
212
213 if (v < blk->Size)
214 {
215 return -1;
216 }
217 if (v > blk->Size)
218 {
219 return 1;
220 }
221 return 0;
222 }
223
224 /* avl functions **********************************************************/
225
226 /* FIXME:
227 * The avl functions should be moved into a separate file.
228 */
229
230 /* The avl_insert and avl_remove are based on libavl (library for manipulation of binary trees). */
231
232 void avl_insert (PNODE * root, PNODE n, int (*compare)(PNODE, PNODE))
233 {
234 PNODE y; /* Top node to update balance factor, and parent. */
235 PNODE p, q; /* Iterator, and parent. */
236 PNODE w; /* New root of rebalanced subtree. */
237 int dir; /* Direction to descend. */
238
239 n->link[0] = n->link[1] = n->parent = NULL;
240 n->balance = 0;
241
242 y = *root;
243 for (q = NULL, p = *root; p != NULL; q = p, p = p->link[dir])
244 {
245 dir = compare(n, p) > 0;
246 if (p->balance != 0)
247 {
248 y = p;
249 }
250 }
251
252 n->parent = q;
253 if (q != NULL)
254 {
255 q->link[dir] = n;
256 }
257 else
258 {
259 *root = n;
260 }
261
262 if (*root == n)
263 {
264 return;
265 }
266
267 for (p = n; p != y; p = q)
268 {
269 q = p->parent;
270 dir = q->link[0] != p;
271 if (dir == 0)
272 {
273 q->balance--;
274 }
275 else
276 {
277 q->balance++;
278 }
279 }
280
281 if (y->balance == -2)
282 {
283 PNODE x = y->link[0];
284 if (x->balance == -1)
285 {
286 w = x;
287 y->link[0] = x->link[1];
288 x->link[1] = y;
289 x->balance = y->balance = 0;
290 x->parent = y->parent;
291 y->parent = x;
292 if (y->link[0] != NULL)
293 {
294 y->link[0]->parent = y;
295 }
296 }
297 else
298 {
299 assert (x->balance == +1);
300 w = x->link[1];
301 x->link[1] = w->link[0];
302 w->link[0] = x;
303 y->link[0] = w->link[1];
304 w->link[1] = y;
305 if (w->balance == -1)
306 {
307 x->balance = 0;
308 y->balance = +1;
309 }
310 else if (w->balance == 0)
311 {
312 x->balance = y->balance = 0;
313 }
314 else /* |w->pavl_balance == +1| */
315 {
316 x->balance = -1;
317 y->balance = 0;
318 }
319 w->balance = 0;
320 w->parent = y->parent;
321 x->parent = y->parent = w;
322 if (x->link[1] != NULL)
323 {
324 x->link[1]->parent = x;
325 }
326 if (y->link[0] != NULL)
327 {
328 y->link[0]->parent = y;
329 }
330 }
331 }
332 else if (y->balance == +2)
333 {
334 PNODE x = y->link[1];
335 if (x->balance == +1)
336 {
337 w = x;
338 y->link[1] = x->link[0];
339 x->link[0] = y;
340 x->balance = y->balance = 0;
341 x->parent = y->parent;
342 y->parent = x;
343 if (y->link[1] != NULL)
344 {
345 y->link[1]->parent = y;
346 }
347 }
348 else
349 {
350 assert (x->balance == -1);
351 w = x->link[0];
352 x->link[0] = w->link[1];
353 w->link[1] = x;
354 y->link[1] = w->link[0];
355 w->link[0] = y;
356 if (w->balance == 1)
357 {
358 x->balance = 0;
359 y->balance = -1;
360 }
361 else if (w->balance == 0)
362 {
363 x->balance = y->balance = 0;
364 }
365 else /* |w->pavl_balance == -1| */
366 {
367 x->balance = +1;
368 y->balance = 0;
369 }
370 w->balance = 0;
371 w->parent = y->parent;
372 x->parent = y->parent = w;
373 if (x->link[0] != NULL)
374 {
375 x->link[0]->parent = x;
376 }
377 if (y->link[1] != NULL)
378 {
379 y->link[1]->parent = y;
380 }
381 }
382 }
383 else
384 {
385 return;
386 }
387 if (w->parent != NULL)
388 {
389 w->parent->link[y != w->parent->link[0]] = w;
390 }
391 else
392 {
393 *root = w;
394 }
395
396 return;
397 }
398
399 void avl_remove (PNODE *root, PNODE item, int (*compare)(PNODE, PNODE))
400 {
401 PNODE p; /* Traverses tree to find node to delete. */
402 PNODE q; /* Parent of |p|. */
403 int dir; /* Side of |q| on which |p| is linked. */
404
405 if (root == NULL || *root == NULL)
406 {
407 return ;
408 }
409
410 p = item;
411 q = p->parent;
412 if (q == NULL)
413 {
414 q = (PNODE) root;
415 dir = 0;
416 }
417 else
418 {
419 dir = compare(p, q) > 0;
420 }
421
422 if (p->link[1] == NULL)
423 {
424 q->link[dir] = p->link[0];
425 if (q->link[dir] != NULL)
426 {
427 q->link[dir]->parent = p->parent;
428 }
429 }
430 else
431 {
432 PNODE r = p->link[1];
433 if (r->link[0] == NULL)
434 {
435 r->link[0] = p->link[0];
436 q->link[dir] = r;
437 r->parent = p->parent;
438 if (r->link[0] != NULL)
439 {
440 r->link[0]->parent = r;
441 }
442 r->balance = p->balance;
443 q = r;
444 dir = 1;
445 }
446 else
447 {
448 PNODE s = r->link[0];
449 while (s->link[0] != NULL)
450 {
451 s = s->link[0];
452 }
453 r = s->parent;
454 r->link[0] = s->link[1];
455 s->link[0] = p->link[0];
456 s->link[1] = p->link[1];
457 q->link[dir] = s;
458 if (s->link[0] != NULL)
459 {
460 s->link[0]->parent = s;
461 }
462 s->link[1]->parent = s;
463 s->parent = p->parent;
464 if (r->link[0] != NULL)
465 {
466 r->link[0]->parent = r;
467 }
468 s->balance = p->balance;
469 q = r;
470 dir = 0;
471 }
472 }
473
474 item->link[0] = item->link[1] = item->parent = NULL;
475 item->balance = 0;
476
477 while (q != (PNODE) root)
478 {
479 PNODE y = q;
480
481 if (y->parent != NULL)
482 {
483 q = y->parent;
484 }
485 else
486 {
487 q = (PNODE) root;
488 }
489
490 if (dir == 0)
491 {
492 dir = q->link[0] != y;
493 y->balance++;
494 if (y->balance == +1)
495 {
496 break;
497 }
498 else if (y->balance == +2)
499 {
500 PNODE x = y->link[1];
501 if (x->balance == -1)
502 {
503 PNODE w;
504
505 assert (x->balance == -1);
506 w = x->link[0];
507 x->link[0] = w->link[1];
508 w->link[1] = x;
509 y->link[1] = w->link[0];
510 w->link[0] = y;
511 if (w->balance == +1)
512 {
513 x->balance = 0;
514 y->balance = -1;
515 }
516 else if (w->balance == 0)
517 {
518 x->balance = y->balance = 0;
519 }
520 else /* |w->pavl_balance == -1| */
521 {
522 x->balance = +1;
523 y->balance = 0;
524 }
525 w->balance = 0;
526 w->parent = y->parent;
527 x->parent = y->parent = w;
528 if (x->link[0] != NULL)
529 {
530 x->link[0]->parent = x;
531 }
532 if (y->link[1] != NULL)
533 {
534 y->link[1]->parent = y;
535 }
536 q->link[dir] = w;
537 }
538 else
539 {
540 y->link[1] = x->link[0];
541 x->link[0] = y;
542 x->parent = y->parent;
543 y->parent = x;
544 if (y->link[1] != NULL)
545 {
546 y->link[1]->parent = y;
547 }
548 q->link[dir] = x;
549 if (x->balance == 0)
550 {
551 x->balance = -1;
552 y->balance = +1;
553 break;
554 }
555 else
556 {
557 x->balance = y->balance = 0;
558 y = x;
559 }
560 }
561 }
562 }
563 else
564 {
565 dir = q->link[0] != y;
566 y->balance--;
567 if (y->balance == -1)
568 {
569 break;
570 }
571 else if (y->balance == -2)
572 {
573 PNODE x = y->link[0];
574 if (x->balance == +1)
575 {
576 PNODE w;
577 assert (x->balance == +1);
578 w = x->link[1];
579 x->link[1] = w->link[0];
580 w->link[0] = x;
581 y->link[0] = w->link[1];
582 w->link[1] = y;
583 if (w->balance == -1)
584 {
585 x->balance = 0;
586 y->balance = +1;
587 }
588 else if (w->balance == 0)
589 {
590 x->balance = y->balance = 0;
591 }
592 else /* |w->pavl_balance == +1| */
593 {
594 x->balance = -1;
595 y->balance = 0;
596 }
597 w->balance = 0;
598 w->parent = y->parent;
599 x->parent = y->parent = w;
600 if (x->link[1] != NULL)
601 {
602 x->link[1]->parent = x;
603 }
604 if (y->link[0] != NULL)
605 {
606 y->link[0]->parent = y;
607 }
608 q->link[dir] = w;
609 }
610 else
611 {
612 y->link[0] = x->link[1];
613 x->link[1] = y;
614 x->parent = y->parent;
615 y->parent = x;
616 if (y->link[0] != NULL)
617 {
618 y->link[0]->parent = y;
619 }
620 q->link[dir] = x;
621 if (x->balance == 0)
622 {
623 x->balance = +1;
624 y->balance = -1;
625 break;
626 }
627 else
628 {
629 x->balance = y->balance = 0;
630 y = x;
631 }
632 }
633 }
634 }
635 }
636
637 }
638
639 PNODE avl_get_next(PNODE root, PNODE p)
640 {
641 PNODE q;
642 if (p->link[1])
643 {
644 p = p->link[1];
645 while(p->link[0])
646 {
647 p = p->link[0];
648 }
649 return p;
650 }
651 else
652 {
653 q = p->parent;
654 while (q && q->link[1] == p)
655 {
656 p = q;
657 q = q->parent;
658 }
659 if (q == NULL)
660 {
661 return NULL;
662 }
663 else
664 {
665 return q;
666 }
667 }
668 }
669
670 PNODE avl_find_equal_or_greater(PNODE root, ULONG size, int (compare)(PVOID, PNODE))
671 {
672 PNODE p;
673 PNODE prev = NULL;
674 int cmp;
675
676 for (p = root; p != NULL;)
677 {
678 cmp = compare((PVOID)&size, p);
679 if (cmp < 0)
680 {
681 prev = p;
682 p = p->link[0];
683 }
684 else if (cmp > 0)
685 {
686 p = p->link[1];
687 }
688 else
689 {
690 while (p->link[0])
691 {
692 cmp = compare((PVOID)&size, p->link[0]);
693 if (cmp != 0)
694 {
695 break;
696 }
697 p = p->link[0];
698 }
699 return p;
700 }
701 }
702 return prev;
703 }
704
705 /* non paged pool functions ************************************************/
706
707 #ifdef TAG_STATISTICS_TRACKING
708 VOID
709 MiRemoveFromTagHashTable(BLOCK_HDR* block)
710 /*
711 * Remove a block from the tag hash table
712 */
713 {
714 if (block->Tag == 0)
715 {
716 return;
717 }
718
719 RemoveEntryList(&block->TagListEntry);
720 }
721
722 VOID
723 MiAddToTagHashTable(BLOCK_HDR* block)
724 /*
725 * Add a block to the tag hash table
726 */
727 {
728 ULONG hash;
729
730 if (block->Tag == 0)
731 {
732 return;
733 }
734
735 hash = block->Tag % TAG_HASH_TABLE_SIZE;
736
737 InsertHeadList(&tag_hash_table[hash], &block->TagListEntry);
738 }
739 #endif /* TAG_STATISTICS_TRACKING */
740
741 VOID
742 MiInitializeNonPagedPool(VOID)
743 {
744 #ifdef TAG_STATISTICS_TRACKING
745 ULONG i;
746 for (i = 0; i < TAG_HASH_TABLE_SIZE; i++)
747 {
748 InitializeListHead(&tag_hash_table[i]);
749 }
750 #endif
751 MiCurrentNonPagedPoolLength = 0;
752 KeInitializeSpinLock(&MmNpoolLock);
753 InitializeListHead(&UsedBlockListHead);
754 InitializeListHead(&AddressListHead);
755 FreeBlockListRoot = NULL;
756 }
757
758 #ifdef TAG_STATISTICS_TRACKING
759 VOID STATIC
760 MiDumpTagStats(ULONG CurrentTag, ULONG CurrentNrBlocks, ULONG CurrentSize)
761 {
762 CHAR c1, c2, c3, c4;
763
764 c1 = (CurrentTag >> 24) & 0xFF;
765 c2 = (CurrentTag >> 16) & 0xFF;
766 c3 = (CurrentTag >> 8) & 0xFF;
767 c4 = CurrentTag & 0xFF;
768
769 if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
770 {
771 DbgPrint("Tag %x (%c%c%c%c) Blocks %d Total Size %d Average Size %d\n",
772 CurrentTag, c4, c3, c2, c1, CurrentNrBlocks,
773 CurrentSize, CurrentSize / CurrentNrBlocks);
774 }
775 else
776 {
777 DbgPrint("Tag %x Blocks %d Total Size %d Average Size %d\n",
778 CurrentTag, CurrentNrBlocks, CurrentSize,
779 CurrentSize / CurrentNrBlocks);
780 }
781 }
782 #endif /* TAG_STATISTICS_TRACKING */
783
784 VOID
785 MiDebugDumpNonPagedPoolStats(BOOLEAN NewOnly)
786 {
787 #ifdef TAG_STATISTICS_TRACKING
788 ULONG i;
789 BLOCK_HDR* current;
790 ULONG CurrentTag;
791 ULONG CurrentNrBlocks;
792 ULONG CurrentSize;
793 ULONG TotalBlocks;
794 ULONG TotalSize;
795 LIST_ENTRY tmpListHead;
796 PLIST_ENTRY current_entry;
797
798 DbgPrint("******* Dumping non paging pool stats ******\n");
799 TotalBlocks = 0;
800 TotalSize = 0;
801 for (i = 0; i < TAG_HASH_TABLE_SIZE; i++)
802 {
803 InitializeListHead(&tmpListHead);
804
805 while (!IsListEmpty(&tag_hash_table[i]))
806 {
807 CurrentTag = 0;
808
809 current_entry = tag_hash_table[i].Flink;
810 while (current_entry != &tag_hash_table[i])
811 {
812 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, TagListEntry);
813 current_entry = current_entry->Flink;
814 if (CurrentTag == 0)
815 {
816 CurrentTag = current->Tag;
817 CurrentNrBlocks = 0;
818 CurrentSize = 0;
819 }
820 if (current->Tag == CurrentTag)
821 {
822 RemoveEntryList(&current->TagListEntry);
823 InsertHeadList(&tmpListHead, &current->TagListEntry);
824 if (!NewOnly || !current->Dumped)
825 {
826 CurrentNrBlocks++;
827 TotalBlocks++;
828 CurrentSize += current->Size;
829 TotalSize += current->Size;
830 current->Dumped = TRUE;
831 }
832 }
833 }
834 if (CurrentTag != 0 && CurrentNrBlocks != 0)
835 {
836 MiDumpTagStats(CurrentTag, CurrentNrBlocks, CurrentSize);
837 }
838 }
839 if (!IsListEmpty(&tmpListHead))
840 {
841 tag_hash_table[i].Flink = tmpListHead.Flink;
842 tag_hash_table[i].Flink->Blink = &tag_hash_table[i];
843 tag_hash_table[i].Blink = tmpListHead.Blink;
844 tag_hash_table[i].Blink->Flink = &tag_hash_table[i];
845 }
846 }
847 if (TotalBlocks != 0)
848 {
849 DbgPrint("TotalBlocks %d TotalSize %d AverageSize %d\n",
850 TotalBlocks, TotalSize, TotalSize / TotalBlocks);
851 }
852 else
853 {
854 DbgPrint("TotalBlocks %d TotalSize %d\n",
855 TotalBlocks, TotalSize);
856 }
857 DbgPrint("Freeblocks %d TotalFreeSize %d AverageFreeSize %d\n",
858 EiNrFreeBlocks, EiFreeNonPagedPool, EiNrFreeBlocks ? EiFreeNonPagedPool / EiNrFreeBlocks : 0);
859 DbgPrint("***************** Dump Complete ***************\n");
860 #endif /* TAG_STATISTICS_TRACKING */
861 }
862
863 VOID
864 MiDebugDumpNonPagedPool(BOOLEAN NewOnly)
865 {
866 BLOCK_HDR* current;
867 PLIST_ENTRY current_entry;
868 KIRQL oldIrql;
869
870 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
871
872 DbgPrint("******* Dumping non paging pool contents ******\n");
873 current_entry = UsedBlockListHead.Flink;
874 while (current_entry != &UsedBlockListHead)
875 {
876 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
877 if (!NewOnly || !current->Dumped)
878 {
879 CHAR c1, c2, c3, c4;
880
881 c1 = (current->Tag >> 24) & 0xFF;
882 c2 = (current->Tag >> 16) & 0xFF;
883 c3 = (current->Tag >> 8) & 0xFF;
884 c4 = current->Tag & 0xFF;
885
886 if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
887 {
888 DbgPrint("Size 0x%x Tag 0x%x (%c%c%c%c) Allocator 0x%x\n",
889 current->Size, current->Tag, c4, c3, c2, c1,
890 current->Caller);
891 }
892 else
893 {
894 DbgPrint("Size 0x%x Tag 0x%x Allocator 0x%x\n",
895 current->Size, current->Tag, current->Caller);
896 }
897 current->Dumped = TRUE;
898 }
899 current_entry = current_entry->Flink;
900 }
901 DbgPrint("***************** Dump Complete ***************\n");
902 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
903 }
904
905 #ifndef WHOLE_PAGE_ALLOCATIONS
906
907 #ifdef ENABLE_VALIDATE_POOL
908 static void validate_free_list(void)
909 /*
910 * FUNCTION: Validate the integrity of the list of free blocks
911 */
912 {
913 BLOCK_HDR* current;
914 PLIST_ENTRY current_entry;
915 unsigned int blocks_seen=0;
916
917 current_entry = MiFreeBlockListHead.Flink;
918 while (current_entry != &MiFreeBlockListHead)
919 {
920 PVOID base_addr;
921
922 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
923 base_addr = (PVOID)current;
924
925 if (current->Magic != BLOCK_HDR_FREE_MAGIC)
926 {
927 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
928 current);
929 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
930 }
931
932 if (base_addr < MiNonPagedPoolStart ||
933 MiNonPagedPoolStart + current->Size > MiNonPagedPoolStart + MiCurrentNonPagedPoolLength)
934 {
935 DbgPrint("Block %x found outside pool area\n",current);
936 DbgPrint("Size %d\n",current->Size);
937 DbgPrint("Limits are %x %x\n",MiNonPagedPoolStart,
938 MiNonPagedPoolStart+MiCurrentNonPagedPoolLength);
939 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
940 }
941 blocks_seen++;
942 if (blocks_seen > MiNrFreeBlocks)
943 {
944 DbgPrint("Too many blocks on free list\n");
945 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
946 }
947 if (current->ListEntry.Flink != &MiFreeBlockListHead &&
948 current->ListEntry.Flink->Blink != &current->ListEntry)
949 {
950 DbgPrint("%s:%d:Break in list (current %x next %x "
951 "current->next->previous %x)\n",
952 __FILE__,__LINE__,current, current->ListEntry.Flink,
953 current->ListEntry.Flink->Blink);
954 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
955 }
956
957 current_entry = current_entry->Flink;
958 }
959 }
960
961 static void validate_used_list(void)
962 /*
963 * FUNCTION: Validate the integrity of the list of used blocks
964 */
965 {
966 BLOCK_HDR* current;
967 PLIST_ENTRY current_entry;
968 unsigned int blocks_seen=0;
969
970 current_entry = UsedBlockListHead.Flink;
971 while (current_entry != &UsedBlockListHead)
972 {
973 PVOID base_addr;
974
975 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
976 base_addr = (PVOID)current;
977
978 if (current->Magic != BLOCK_HDR_USED_MAGIC)
979 {
980 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
981 current);
982 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
983 }
984 if (base_addr < MiNonPagedPoolStart ||
985 (base_addr+current->Size) >
986 MiNonPagedPoolStart+MiCurrentNonPagedPoolLength)
987 {
988 DbgPrint("Block %x found outside pool area\n",current);
989 for(;;);
990 }
991 blocks_seen++;
992 if (blocks_seen > EiNrUsedBlocks)
993 {
994 DbgPrint("Too many blocks on used list\n");
995 for(;;);
996 }
997 if (current->ListEntry.Flink != &UsedBlockListHead &&
998 current->ListEntry.Flink->Blink != &current->ListEntry)
999 {
1000 DbgPrint("Break in list (current %x next %x)\n",
1001 current, current->ListEntry.Flink);
1002 for(;;);
1003 }
1004
1005 current_entry = current_entry->Flink;
1006 }
1007 }
1008
1009 static void check_duplicates(BLOCK_HDR* blk)
1010 /*
1011 * FUNCTION: Check a block has no duplicates
1012 * ARGUMENTS:
1013 * blk = block to check
1014 * NOTE: Bug checks if duplicates are found
1015 */
1016 {
1017 char* base = (char*)blk;
1018 char* last = ((char*)blk) + +sizeof(BLOCK_HDR) + blk->Size;
1019 BLOCK_HDR* current;
1020 PLIST_ENTRY current_entry;
1021
1022 current_entry = MiFreeBlockListHead.Flink;
1023 while (current_entry != &MiFreeBlockListHead)
1024 {
1025 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
1026
1027 if (current->Magic != BLOCK_HDR_FREE_MAGIC)
1028 {
1029 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
1030 current);
1031 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1032 }
1033
1034 if ( (char*)current > base && (char*)current < last )
1035 {
1036 DbgPrint("intersecting blocks on list\n");
1037 for(;;);
1038 }
1039 if ( (char*)current < base &&
1040 ((char*)current + current->Size + sizeof(BLOCK_HDR))
1041 > base )
1042 {
1043 DbgPrint("intersecting blocks on list\n");
1044 for(;;);
1045 }
1046
1047 current_entry = current_entry->Flink;
1048 }
1049
1050 current_entry = UsedBlockListHead.Flink;
1051 while (current_entry != &UsedBlockListHead)
1052 {
1053 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
1054
1055 if ( (char*)current > base && (char*)current < last )
1056 {
1057 DbgPrint("intersecting blocks on list\n");
1058 for(;;);
1059 }
1060 if ( (char*)current < base &&
1061 ((char*)current + current->Size + sizeof(BLOCK_HDR))
1062 > base )
1063 {
1064 DbgPrint("intersecting blocks on list\n");
1065 for(;;);
1066 }
1067
1068 current_entry = current_entry->Flink;
1069 }
1070
1071 }
1072
1073 static void validate_kernel_pool(void)
1074 /*
1075 * FUNCTION: Checks the integrity of the kernel memory heap
1076 */
1077 {
1078 BLOCK_HDR* current;
1079 PLIST_ENTRY current_entry;
1080
1081 validate_free_list();
1082 validate_used_list();
1083
1084 current_entry = MiFreeBlockListHead.Flink;
1085 while (current_entry != &MiFreeBlockListHead)
1086 {
1087 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
1088 check_duplicates(current);
1089 current_entry = current_entry->Flink;
1090 }
1091 current_entry = UsedBlockListHead.Flink;
1092 while (current_entry != &UsedBlockListHead)
1093 {
1094 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
1095 check_duplicates(current);
1096 current_entry = current_entry->Flink;
1097 }
1098 }
1099 #endif
1100
1101 #if 0
1102 STATIC VOID
1103 free_pages(BLOCK_HDR* blk)
1104 {
1105 ULONG start;
1106 ULONG end;
1107 ULONG i;
1108
1109 start = (ULONG)blk;
1110 end = (ULONG)blk + sizeof(BLOCK_HDR) + blk->Size;
1111
1112 /*
1113 * If the block doesn't contain a whole page then there is nothing to do
1114 */
1115 if (PAGE_ROUND_UP(start) >= PAGE_ROUND_DOWN(end))
1116 {
1117 return;
1118 }
1119 }
1120 #endif
1121
1122 static void remove_from_used_list(BLOCK_HDR* current)
1123 {
1124 RemoveEntryList(&current->ListEntry);
1125 EiUsedNonPagedPool -= current->Size;
1126 EiNrUsedBlocks--;
1127 }
1128
1129 static void remove_from_free_list(BLOCK_HDR* current)
1130 {
1131 DPRINT("remove_from_free_list %d\n", current->Size);
1132
1133 avl_remove(&FreeBlockListRoot, &current->Node, compare_node);
1134
1135 EiFreeNonPagedPool -= current->Size;
1136 EiNrFreeBlocks--;
1137 DPRINT("remove_from_free_list done\n");
1138 #ifdef DUMP_AVL
1139 DumpFreeBlockTree();
1140 #endif
1141 }
1142
1143 static void
1144 add_to_free_list(BLOCK_HDR* blk)
1145 /*
1146 * FUNCTION: add the block to the free list (internal)
1147 */
1148 {
1149 BLOCK_HDR* current;
1150
1151 DPRINT("add_to_free_list (%d)\n", blk->Size);
1152
1153 EiNrFreeBlocks++;
1154
1155 if (blk->AddressList.Blink != &AddressListHead)
1156 {
1157 current = CONTAINING_RECORD(blk->AddressList.Blink, BLOCK_HDR, AddressList);
1158 if (current->Magic == BLOCK_HDR_FREE_MAGIC &&
1159 (PVOID)current + current->Size + sizeof(BLOCK_HDR) == (PVOID)blk)
1160 {
1161 CHECKPOINT;
1162 remove_from_free_list(current);
1163 RemoveEntryList(&blk->AddressList);
1164 current->Size = current->Size + sizeof(BLOCK_HDR) + blk->Size;
1165 current->Magic = BLOCK_HDR_USED_MAGIC;
1166 memset(blk, 0xcc, sizeof(BLOCK_HDR));
1167 blk = current;
1168 }
1169 }
1170
1171 if (blk->AddressList.Flink != &AddressListHead)
1172 {
1173 current = CONTAINING_RECORD(blk->AddressList.Flink, BLOCK_HDR, AddressList);
1174 if (current->Magic == BLOCK_HDR_FREE_MAGIC &&
1175 (PVOID)blk + blk->Size + sizeof(BLOCK_HDR) == (PVOID)current)
1176 {
1177 CHECKPOINT;
1178 remove_from_free_list(current);
1179 RemoveEntryList(&current->AddressList);
1180 blk->Size = blk->Size + sizeof(BLOCK_HDR) + current->Size;
1181 memset(current, 0xcc, sizeof(BLOCK_HDR));
1182 }
1183 }
1184
1185 DPRINT("%d\n", blk->Size);
1186 blk->Magic = BLOCK_HDR_FREE_MAGIC;
1187 EiFreeNonPagedPool += blk->Size;
1188 avl_insert(&FreeBlockListRoot, &blk->Node, compare_node);
1189
1190 DPRINT("add_to_free_list done\n");
1191 #ifdef DUMP_AVL
1192 DumpFreeBlockTree();
1193 #endif
1194 }
1195
1196 static void add_to_used_list(BLOCK_HDR* blk)
1197 /*
1198 * FUNCTION: add the block to the used list (internal)
1199 */
1200 {
1201 InsertHeadList(&UsedBlockListHead, &blk->ListEntry);
1202 EiUsedNonPagedPool += blk->Size;
1203 EiNrUsedBlocks++;
1204 }
1205
1206 inline static void* block_to_address(BLOCK_HDR* blk)
1207 /*
1208 * FUNCTION: Translate a block header address to the corresponding block
1209 * address (internal)
1210 */
1211 {
1212 return ( (void *) ((char*)blk + sizeof(BLOCK_HDR)) );
1213 }
1214
1215 inline static BLOCK_HDR* address_to_block(void* addr)
1216 {
1217 return (BLOCK_HDR *)
1218 ( ((char*)addr) - sizeof(BLOCK_HDR) );
1219 }
1220
1221 static BLOCK_HDR* lookup_block(unsigned int size)
1222 {
1223 BLOCK_HDR* current;
1224 BLOCK_HDR* best = NULL;
1225 ULONG new_size;
1226 PVOID block, block_boundary;
1227 PNODE p;
1228
1229 DPRINT("lookup_block %d\n", size);
1230
1231 if (size < PAGE_SIZE)
1232 {
1233 p = avl_find_equal_or_greater(FreeBlockListRoot, size, compare_value);
1234 if (p)
1235 {
1236 best = CONTAINING_RECORD(p, BLOCK_HDR, Node);
1237 }
1238 }
1239 else
1240 {
1241 p = avl_find_equal_or_greater(FreeBlockListRoot, size, compare_value);
1242
1243 while(p)
1244 {
1245 current = CONTAINING_RECORD(p, BLOCK_HDR, Node);
1246 block = block_to_address(current);
1247 block_boundary = (PVOID)PAGE_ROUND_UP((ULONG)block);
1248 new_size = (ULONG)block_boundary - (ULONG)block + size;
1249 if (new_size != size && (ULONG)block_boundary - (ULONG)block < sizeof(BLOCK_HDR))
1250 {
1251 new_size += PAGE_SIZE;
1252 }
1253 if (current->Size >= new_size &&
1254 (best == NULL || current->Size < best->Size))
1255 {
1256 best = current;
1257 }
1258 if (best && current->Size >= size + PAGE_SIZE + 2 * sizeof(BLOCK_HDR))
1259 {
1260 break;
1261 }
1262 p = avl_get_next(FreeBlockListRoot, p);
1263
1264 }
1265 }
1266 DPRINT("lookup_block done %d\n", best ? best->Size : 0);
1267 return best;
1268 }
1269
1270 static void* take_block(BLOCK_HDR* current, unsigned int size,
1271 ULONG Tag, PVOID Caller)
1272 /*
1273 * FUNCTION: Allocate a used block of least 'size' from the specified
1274 * free block
1275 * RETURNS: The address of the created memory block
1276 */
1277 {
1278 BLOCK_HDR* blk;
1279 BOOL Removed = FALSE;
1280
1281 DPRINT("take_block\n");
1282
1283 if (size >= PAGE_SIZE)
1284 {
1285 blk = address_to_block((PVOID)PAGE_ROUND_UP(block_to_address (current)));
1286 if (blk != current)
1287 {
1288 if ((ULONG)blk - (ULONG)current < sizeof(BLOCK_HDR))
1289 {
1290 (ULONG)blk += PAGE_SIZE;
1291 }
1292 assert((ULONG)blk - (ULONG)current + size <= current->Size && (ULONG)blk - (ULONG)current >= sizeof(BLOCK_HDR));
1293
1294 memset(blk, 0, sizeof(BLOCK_HDR));
1295 blk->Magic = BLOCK_HDR_USED_MAGIC;
1296 blk->Size = current->Size - ((ULONG)blk - (ULONG)current);
1297 remove_from_free_list(current);
1298 current->Size -= (blk->Size + sizeof(BLOCK_HDR));
1299 blk->AddressList.Flink = current->AddressList.Flink;
1300 blk->AddressList.Flink->Blink = &blk->AddressList;
1301 blk->AddressList.Blink = &current->AddressList;
1302 current->AddressList.Flink = &blk->AddressList;
1303 add_to_free_list(current);
1304 Removed = TRUE;
1305 current = blk;
1306 }
1307 }
1308 if (Removed == FALSE)
1309 {
1310 remove_from_free_list(current);
1311 }
1312
1313 /*
1314 * If the block is much bigger than required then split it and
1315 * return a pointer to the allocated section. If the difference
1316 * between the sizes is marginal it makes no sense to have the
1317 * extra overhead
1318 */
1319 if (current->Size > size + sizeof(BLOCK_HDR))
1320 {
1321 BLOCK_HDR* free_blk;
1322
1323 /*
1324 * Replace the bigger block with a smaller block in the
1325 * same position in the list
1326 */
1327 free_blk = (BLOCK_HDR *)(((char*)current)
1328 + sizeof(BLOCK_HDR) + size);
1329
1330 free_blk->Size = current->Size - (sizeof(BLOCK_HDR) + size);
1331 current->Size=size;
1332 free_blk->AddressList.Flink = current->AddressList.Flink;
1333 free_blk->AddressList.Flink->Blink = &free_blk->AddressList;
1334 free_blk->AddressList.Blink = &current->AddressList;
1335 current->AddressList.Flink = &free_blk->AddressList;
1336 current->Magic = BLOCK_HDR_USED_MAGIC;
1337 free_blk->Magic = BLOCK_HDR_FREE_MAGIC;
1338 add_to_free_list(free_blk);
1339 add_to_used_list(current);
1340 current->Tag = Tag;
1341 current->Caller = Caller;
1342 current->Dumped = FALSE;
1343 #ifdef TAG_STATISTICS_TRACKING
1344 MiAddToTagHashTable(current);
1345 #endif /* TAG_STATISTICS_TRACKING */
1346 VALIDATE_POOL;
1347 return(block_to_address(current));
1348 }
1349
1350 /*
1351 * Otherwise allocate the whole block
1352 */
1353
1354 current->Magic = BLOCK_HDR_USED_MAGIC;
1355 current->Tag = Tag;
1356 current->Caller = Caller;
1357 current->Dumped = FALSE;
1358 add_to_used_list(current);
1359 #ifdef TAG_STATISTICS_TRACKING
1360 MiAddToTagHashTable(current);
1361 #endif /* TAG_STATISTICS_TRACKING */
1362
1363 VALIDATE_POOL;
1364 return(block_to_address(current));
1365 }
1366
1367 static void* grow_kernel_pool(unsigned int size, ULONG Tag, PVOID Caller)
1368 /*
1369 * FUNCTION: Grow the executive heap to accomodate a block of at least 'size'
1370 * bytes
1371 */
1372 {
1373 ULONG nr_pages = PAGE_ROUND_UP(size + sizeof(BLOCK_HDR)) / PAGE_SIZE;
1374 ULONG start;
1375 BLOCK_HDR* blk=NULL;
1376 BLOCK_HDR* current;
1377 ULONG i;
1378 KIRQL oldIrql;
1379 NTSTATUS Status;
1380 PVOID block = NULL;
1381 PLIST_ENTRY current_entry;
1382
1383 if (size >= PAGE_SIZE)
1384 {
1385 nr_pages ++;
1386 }
1387
1388 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1389 start = (ULONG)MiNonPagedPoolStart + MiCurrentNonPagedPoolLength;
1390 if (MiCurrentNonPagedPoolLength + nr_pages * PAGE_SIZE > MiNonPagedPoolLength)
1391 {
1392 DbgPrint("CRITICAL: Out of non-paged pool space\n");
1393 KEBUGCHECK(0);
1394 }
1395 MiCurrentNonPagedPoolLength += nr_pages * PAGE_SIZE;
1396 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1397
1398 DPRINT("growing heap for block size %d, ",size);
1399 DPRINT("start %x\n",start);
1400
1401 for (i=0;i<nr_pages;i++)
1402 {
1403 PHYSICAL_ADDRESS Page;
1404 /* FIXME: Check whether we can really wait here. */
1405 Status = MmRequestPageMemoryConsumer(MC_NPPOOL, TRUE, &Page);
1406 if (!NT_SUCCESS(Status))
1407 {
1408 KEBUGCHECK(0);
1409 return(NULL);
1410 }
1411 Status = MmCreateVirtualMapping(NULL,
1412 (PVOID)(start + (i*PAGE_SIZE)),
1413 PAGE_READWRITE|PAGE_SYSTEM,
1414 Page,
1415 TRUE);
1416 if (!NT_SUCCESS(Status))
1417 {
1418 DbgPrint("Unable to create virtual mapping\n");
1419 KEBUGCHECK(0);
1420 }
1421 }
1422
1423 blk = (struct _BLOCK_HDR *)start;
1424 memset(blk, 0, sizeof(BLOCK_HDR));
1425 blk->Size = (nr_pages * PAGE_SIZE) - sizeof(BLOCK_HDR);
1426 memset(block_to_address(blk), 0xcc, blk->Size);
1427
1428 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1429 current_entry = AddressListHead.Blink;
1430 while (current_entry != &AddressListHead)
1431 {
1432 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, AddressList);
1433 if ((PVOID)current + current->Size < (PVOID)blk)
1434 {
1435 InsertHeadList(current_entry, &blk->AddressList);
1436 break;
1437 }
1438 current_entry = current_entry->Blink;
1439 }
1440 if (current_entry == &AddressListHead)
1441 {
1442 InsertHeadList(&AddressListHead, &blk->AddressList);
1443 }
1444 blk->Magic = BLOCK_HDR_FREE_MAGIC;
1445 add_to_free_list(blk);
1446 blk = lookup_block(size);
1447 if (blk)
1448 {
1449 block = take_block(blk, size, Tag, Caller);
1450 VALIDATE_POOL;
1451 }
1452 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1453 return block;
1454 }
1455
1456 #endif /* not WHOLE_PAGE_ALLOCATIONS */
1457
1458 VOID STDCALL ExFreeNonPagedPool (PVOID block)
1459 /*
1460 * FUNCTION: Releases previously allocated memory
1461 * ARGUMENTS:
1462 * block = block to free
1463 */
1464 {
1465 #ifdef WHOLE_PAGE_ALLOCATIONS /* WHOLE_PAGE_ALLOCATIONS */
1466 KIRQL oldIrql;
1467
1468 if (block == NULL)
1469 {
1470 return;
1471 }
1472
1473 DPRINT("freeing block %x\n",blk);
1474
1475 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->size,
1476 ((PULONG)&block)[-1]);
1477
1478 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1479
1480 ExFreeWholePageBlock(block);
1481 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1482
1483 #else /* not WHOLE_PAGE_ALLOCATIONS */
1484
1485 BLOCK_HDR* blk=address_to_block(block);
1486 KIRQL oldIrql;
1487
1488 if (block == NULL)
1489 {
1490 return;
1491 }
1492
1493 DPRINT("freeing block %x\n",blk);
1494
1495 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->size,
1496 ((PULONG)&block)[-1]);
1497
1498 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1499
1500 VALIDATE_POOL;
1501
1502 if (blk->Magic != BLOCK_HDR_USED_MAGIC)
1503 {
1504 if (blk->Magic == BLOCK_HDR_FREE_MAGIC)
1505 {
1506 DbgPrint("ExFreePool of already freed address %x\n", block);
1507 }
1508 else
1509 {
1510 DbgPrint("ExFreePool of non-allocated address %x (magic %x)\n",
1511 block, blk->Magic);
1512 }
1513 KEBUGCHECK(0);
1514 return;
1515 }
1516
1517 memset(block, 0xcc, blk->Size);
1518
1519 #ifdef TAG_STATISTICS_TRACKING
1520 MiRemoveFromTagHashTable(blk);
1521 #endif /* TAG_STATISTICS_TRACKING */
1522 remove_from_used_list(blk);
1523 blk->Tag = 0;
1524 blk->Caller = NULL;
1525 blk->TagListEntry.Flink = blk->TagListEntry.Blink = NULL;
1526 blk->Magic = BLOCK_HDR_FREE_MAGIC;
1527 add_to_free_list(blk);
1528 VALIDATE_POOL;
1529 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1530
1531 #endif /* WHOLE_PAGE_ALLOCATIONS */
1532 }
1533
1534 PVOID STDCALL
1535 ExAllocateNonPagedPoolWithTag(ULONG Type, ULONG Size, ULONG Tag, PVOID Caller)
1536 {
1537 #ifdef WHOLE_PAGE_ALLOCATIONS
1538 PVOID block;
1539 KIRQL oldIrql;
1540
1541 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
1542 Size,Caller);
1543
1544 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1545
1546 /*
1547 * accomodate this useful idiom
1548 */
1549 if (Size == 0)
1550 {
1551 POOL_TRACE("= NULL\n");
1552 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1553 return(NULL);
1554 }
1555
1556 block = ExAllocateWholePageBlock(Size);
1557 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1558 return(block);
1559
1560 #else /* not WHOLE_PAGE_ALLOCATIONS */
1561 PVOID block;
1562 BLOCK_HDR* best = NULL;
1563 KIRQL oldIrql;
1564
1565 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
1566 Size,Caller);
1567
1568 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1569
1570 VALIDATE_POOL;
1571
1572 #if 0
1573 /* after some allocations print the npaged pool stats */
1574 #ifdef TAG_STATISTICS_TRACKING
1575 {
1576 static ULONG counter = 0;
1577 if (counter++ % 100000 == 0)
1578 {
1579 MiDebugDumpNonPagedPoolStats(FALSE);
1580 }
1581 }
1582 #endif
1583 #endif
1584 /*
1585 * accomodate this useful idiom
1586 */
1587 if (Size == 0)
1588 {
1589 POOL_TRACE("= NULL\n");
1590 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1591 return(NULL);
1592 }
1593 /* Make the size dword alligned, this makes the block dword alligned */
1594 Size = ROUND_UP(Size, 4);
1595 /*
1596 * Look for an already created block of sufficent size
1597 */
1598 best = lookup_block(Size);
1599 if (best == NULL)
1600 {
1601 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1602 block = grow_kernel_pool(Size, Tag, Caller);
1603 if (block == NULL)
1604 {
1605 DPRINT1("%d\n", Size);
1606 DumpFreeBlockTree();
1607 }
1608 assert(block != NULL);
1609 memset(block,0,Size);
1610 }
1611 else
1612 {
1613 block=take_block(best, Size, Tag, Caller);
1614 VALIDATE_POOL;
1615 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1616 memset(block,0,Size);
1617 }
1618 return(block);
1619 #endif /* WHOLE_PAGE_ALLOCATIONS */
1620 }
1621
1622 #ifdef WHOLE_PAGE_ALLOCATIONS
1623
1624 PVOID STDCALL
1625 ExAllocateWholePageBlock(ULONG UserSize)
1626 {
1627 PVOID Address;
1628 PHYSICAL_ADDRESS Page;
1629 ULONG i;
1630 ULONG Size;
1631 ULONG NrPages;
1632
1633 Size = sizeof(ULONG) + UserSize;
1634 NrPages = ROUND_UP(Size, PAGE_SIZE) / PAGE_SIZE;
1635
1636 Address = MiAllocNonPagedPoolRegion(NrPages + 1);
1637
1638 for (i = 0; i < NrPages; i++)
1639 {
1640 Page = MmAllocPage(MC_NPPOOL, 0);
1641 if (Page.QuadPart == 0LL)
1642 {
1643 KEBUGCHECK(0);
1644 }
1645 MmCreateVirtualMapping(NULL,
1646 Address + (i * PAGE_SIZE),
1647 PAGE_READWRITE | PAGE_SYSTEM,
1648 Page,
1649 TRUE);
1650 }
1651
1652 *((PULONG)((ULONG)Address + (NrPages * PAGE_SIZE) - Size)) = NrPages;
1653 return((PVOID)((ULONG)Address + (NrPages * PAGE_SIZE) - UserSize));
1654 }
1655
1656 VOID STDCALL
1657 ExFreeWholePageBlock(PVOID Addr)
1658 {
1659 ULONG NrPages;
1660
1661 if (Addr < MiNonPagedPoolStart ||
1662 Addr >= (MiNonPagedPoolStart + MiCurrentNonPagedPoolLength))
1663 {
1664 DbgPrint("Block %x found outside pool area\n", Addr);
1665 KEBUGCHECK(0);
1666 }
1667 NrPages = *(PULONG)((ULONG)Addr - sizeof(ULONG));
1668 MiFreeNonPagedPoolRegion((PVOID)PAGE_ROUND_DOWN((ULONG)Addr), NrPages, TRUE);
1669 }
1670
1671 #endif /* WHOLE_PAGE_ALLOCATIONS */
1672
1673 /* EOF */