[NTOSKRNL/FORMATTING]
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / expool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include "../ARM3/miarm.h"
17
18 #undef ExAllocatePoolWithQuota
19 #undef ExAllocatePoolWithQuotaTag
20
21 /* GLOBALS ********************************************************************/
22
23 ULONG ExpNumberOfPagedPools;
24 POOL_DESCRIPTOR NonPagedPoolDescriptor;
25 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16 + 1];
26 PPOOL_DESCRIPTOR PoolVector[2];
27 PVOID PoolTrackTable;
28 PKGUARDED_MUTEX ExpPagedPoolMutex;
29
30 /* Pool block/header/list access macros */
31 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
32 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER))
33 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
34 #define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
35 #define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize))
36
37 /*
38 * Pool list access debug macros, similar to Arthur's pfnlist.c work.
39 * Microsoft actually implements similar checks in the Windows Server 2003 SP1
40 * pool code, but only for checked builds.
41 *
42 * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
43 * that these checks are done even on retail builds, due to the increasing
44 * number of kernel-mode attacks which depend on dangling list pointers and other
45 * kinds of list-based attacks.
46 *
47 * For now, I will leave these checks on all the time, but later they are likely
48 * to be DBG-only, at least until there are enough kernel-mode security attacks
49 * against ReactOS to warrant the performance hit.
50 *
51 * For now, these are not made inline, so we can get good stack traces.
52 */
53 PLIST_ENTRY
54 NTAPI
55 ExpDecodePoolLink(IN PLIST_ENTRY Link)
56 {
57 return (PLIST_ENTRY)((ULONG_PTR)Link & ~1);
58 }
59
60 PLIST_ENTRY
61 NTAPI
62 ExpEncodePoolLink(IN PLIST_ENTRY Link)
63 {
64 return (PLIST_ENTRY)((ULONG_PTR)Link | 1);
65 }
66
67 VOID
68 NTAPI
69 ExpCheckPoolLinks(IN PLIST_ENTRY ListHead)
70 {
71 if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) ||
72 (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead))
73 {
74 KeBugCheckEx(BAD_POOL_HEADER,
75 3,
76 (ULONG_PTR)ListHead,
77 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink),
78 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink));
79 }
80 }
81
82 VOID
83 NTAPI
84 ExpInitializePoolListHead(IN PLIST_ENTRY ListHead)
85 {
86 ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead);
87 }
88
89 BOOLEAN
90 NTAPI
91 ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead)
92 {
93 return (ExpDecodePoolLink(ListHead->Flink) == ListHead);
94 }
95
96 VOID
97 NTAPI
98 ExpRemovePoolEntryList(IN PLIST_ENTRY Entry)
99 {
100 PLIST_ENTRY Blink, Flink;
101 Flink = ExpDecodePoolLink(Entry->Flink);
102 Blink = ExpDecodePoolLink(Entry->Blink);
103 Flink->Blink = ExpEncodePoolLink(Blink);
104 Blink->Flink = ExpEncodePoolLink(Flink);
105 }
106
107 PLIST_ENTRY
108 NTAPI
109 ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead)
110 {
111 PLIST_ENTRY Entry, Flink;
112 Entry = ExpDecodePoolLink(ListHead->Flink);
113 Flink = ExpDecodePoolLink(Entry->Flink);
114 ListHead->Flink = ExpEncodePoolLink(Flink);
115 Flink->Blink = ExpEncodePoolLink(ListHead);
116 return Entry;
117 }
118
119 PLIST_ENTRY
120 NTAPI
121 ExpRemovePoolTailList(IN PLIST_ENTRY ListHead)
122 {
123 PLIST_ENTRY Entry, Blink;
124 Entry = ExpDecodePoolLink(ListHead->Blink);
125 Blink = ExpDecodePoolLink(Entry->Blink);
126 ListHead->Blink = ExpEncodePoolLink(Blink);
127 Blink->Flink = ExpEncodePoolLink(ListHead);
128 return Entry;
129 }
130
131 VOID
132 NTAPI
133 ExpInsertPoolTailList(IN PLIST_ENTRY ListHead,
134 IN PLIST_ENTRY Entry)
135 {
136 PLIST_ENTRY Blink;
137 ExpCheckPoolLinks(ListHead);
138 Blink = ExpDecodePoolLink(ListHead->Blink);
139 Entry->Flink = ExpEncodePoolLink(ListHead);
140 Entry->Blink = ExpEncodePoolLink(Blink);
141 Blink->Flink = ExpEncodePoolLink(Entry);
142 ListHead->Blink = ExpEncodePoolLink(Entry);
143 ExpCheckPoolLinks(ListHead);
144 }
145
146 VOID
147 NTAPI
148 ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead,
149 IN PLIST_ENTRY Entry)
150 {
151 PLIST_ENTRY Flink;
152 ExpCheckPoolLinks(ListHead);
153 Flink = ExpDecodePoolLink(ListHead->Flink);
154 Entry->Flink = ExpEncodePoolLink(Flink);
155 Entry->Blink = ExpEncodePoolLink(ListHead);
156 Flink->Blink = ExpEncodePoolLink(Entry);
157 ListHead->Flink = ExpEncodePoolLink(Entry);
158 ExpCheckPoolLinks(ListHead);
159 }
160
161 VOID
162 NTAPI
163 ExpCheckPoolHeader(IN PPOOL_HEADER Entry)
164 {
165 PPOOL_HEADER PreviousEntry, NextEntry;
166
167 /* Is there a block before this one? */
168 if (Entry->PreviousSize)
169 {
170 /* Get it */
171 PreviousEntry = POOL_PREV_BLOCK(Entry);
172
173 /* The two blocks must be on the same page! */
174 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry))
175 {
176 /* Something is awry */
177 KeBugCheckEx(BAD_POOL_HEADER,
178 6,
179 (ULONG_PTR)PreviousEntry,
180 __LINE__,
181 (ULONG_PTR)Entry);
182 }
183
184 /* This block should also indicate that it's as large as we think it is */
185 if (PreviousEntry->BlockSize != Entry->PreviousSize)
186 {
187 /* Otherwise, someone corrupted one of the sizes */
188 KeBugCheckEx(BAD_POOL_HEADER,
189 5,
190 (ULONG_PTR)PreviousEntry,
191 __LINE__,
192 (ULONG_PTR)Entry);
193 }
194 }
195 else if (PAGE_ALIGN(Entry) != Entry)
196 {
197 /* If there's no block before us, we are the first block, so we should be on a page boundary */
198 KeBugCheckEx(BAD_POOL_HEADER,
199 7,
200 0,
201 __LINE__,
202 (ULONG_PTR)Entry);
203 }
204
205 /* This block must have a size */
206 if (!Entry->BlockSize)
207 {
208 /* Someone must've corrupted this field */
209 KeBugCheckEx(BAD_POOL_HEADER,
210 8,
211 0,
212 __LINE__,
213 (ULONG_PTR)Entry);
214 }
215
216 /* Okay, now get the next block */
217 NextEntry = POOL_NEXT_BLOCK(Entry);
218
219 /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
220 if (PAGE_ALIGN(NextEntry) != NextEntry)
221 {
222 /* The two blocks must be on the same page! */
223 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry))
224 {
225 /* Something is messed up */
226 KeBugCheckEx(BAD_POOL_HEADER,
227 9,
228 (ULONG_PTR)NextEntry,
229 __LINE__,
230 (ULONG_PTR)Entry);
231 }
232
233 /* And this block should think we are as large as we truly are */
234 if (NextEntry->PreviousSize != Entry->BlockSize)
235 {
236 /* Otherwise, someone corrupted the field */
237 KeBugCheckEx(BAD_POOL_HEADER,
238 5,
239 (ULONG_PTR)NextEntry,
240 __LINE__,
241 (ULONG_PTR)Entry);
242 }
243 }
244 }
245
246 VOID
247 NTAPI
248 ExpCheckPoolBlocks(IN PVOID Block)
249 {
250 BOOLEAN FoundBlock = FALSE;
251 SIZE_T Size = 0;
252 PPOOL_HEADER Entry;
253
254 /* Get the first entry for this page, make sure it really is the first */
255 Entry = PAGE_ALIGN(Block);
256 ASSERT(Entry->PreviousSize == 0);
257
258 /* Now scan each entry */
259 while (TRUE)
260 {
261 /* When we actually found our block, remember this */
262 if (Entry == Block) FoundBlock = TRUE;
263
264 /* Now validate this block header */
265 ExpCheckPoolHeader(Entry);
266
267 /* And go to the next one, keeping track of our size */
268 Size += Entry->BlockSize;
269 Entry = POOL_NEXT_BLOCK(Entry);
270
271 /* If we hit the last block, stop */
272 if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break;
273
274 /* If we hit the end of the page, stop */
275 if (PAGE_ALIGN(Entry) == Entry) break;
276 }
277
278 /* We must've found our block, and we must have hit the end of the page */
279 if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock))
280 {
281 /* Otherwise, the blocks are messed up */
282 KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry);
283 }
284 }
285
286 /* PRIVATE FUNCTIONS **********************************************************/
287
288 VOID
289 NTAPI
290 INIT_FUNCTION
291 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor,
292 IN POOL_TYPE PoolType,
293 IN ULONG PoolIndex,
294 IN ULONG Threshold,
295 IN PVOID PoolLock)
296 {
297 PLIST_ENTRY NextEntry, LastEntry;
298
299 //
300 // Setup the descriptor based on the caller's request
301 //
302 PoolDescriptor->PoolType = PoolType;
303 PoolDescriptor->PoolIndex = PoolIndex;
304 PoolDescriptor->Threshold = Threshold;
305 PoolDescriptor->LockAddress = PoolLock;
306
307 //
308 // Initialize accounting data
309 //
310 PoolDescriptor->RunningAllocs = 0;
311 PoolDescriptor->RunningDeAllocs = 0;
312 PoolDescriptor->TotalPages = 0;
313 PoolDescriptor->TotalBytes = 0;
314 PoolDescriptor->TotalBigPages = 0;
315
316 //
317 // Nothing pending for now
318 //
319 PoolDescriptor->PendingFrees = NULL;
320 PoolDescriptor->PendingFreeDepth = 0;
321
322 //
323 // Loop all the descriptor's allocation lists and initialize them
324 //
325 NextEntry = PoolDescriptor->ListHeads;
326 LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
327 while (NextEntry < LastEntry)
328 {
329 ExpInitializePoolListHead(NextEntry);
330 NextEntry++;
331 }
332 }
333
334 VOID
335 NTAPI
336 INIT_FUNCTION
337 InitializePool(IN POOL_TYPE PoolType,
338 IN ULONG Threshold)
339 {
340 PPOOL_DESCRIPTOR Descriptor;
341
342 //
343 // Check what kind of pool this is
344 //
345 if (PoolType == NonPagedPool)
346 {
347 //
348 // Initialize the nonpaged pool descriptor
349 //
350 PoolVector[NonPagedPool] = &NonPagedPoolDescriptor;
351 ExInitializePoolDescriptor(PoolVector[NonPagedPool],
352 NonPagedPool,
353 0,
354 Threshold,
355 NULL);
356 }
357 else
358 {
359 //
360 // Allocate the pool descriptor
361 //
362 Descriptor = ExAllocatePoolWithTag(NonPagedPool,
363 sizeof(KGUARDED_MUTEX) +
364 sizeof(POOL_DESCRIPTOR),
365 'looP');
366 if (!Descriptor)
367 {
368 //
369 // This is really bad...
370 //
371 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
372 0,
373 -1,
374 -1,
375 -1);
376 }
377
378 //
379 // Setup the vector and guarded mutex for paged pool
380 //
381 PoolVector[PagedPool] = Descriptor;
382 ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1);
383 KeInitializeGuardedMutex(ExpPagedPoolMutex);
384 ExInitializePoolDescriptor(Descriptor,
385 PagedPool,
386 0,
387 Threshold,
388 ExpPagedPoolMutex);
389 }
390 }
391
392 FORCEINLINE
393 KIRQL
394 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
395 {
396 //
397 // Check if this is nonpaged pool
398 //
399 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
400 {
401 //
402 // Use the queued spin lock
403 //
404 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
405 }
406 else
407 {
408 //
409 // Use the guarded mutex
410 //
411 KeAcquireGuardedMutex(Descriptor->LockAddress);
412 return APC_LEVEL;
413 }
414 }
415
416 FORCEINLINE
417 VOID
418 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor,
419 IN KIRQL OldIrql)
420 {
421 //
422 // Check if this is nonpaged pool
423 //
424 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
425 {
426 //
427 // Use the queued spin lock
428 //
429 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
430 }
431 else
432 {
433 //
434 // Use the guarded mutex
435 //
436 KeReleaseGuardedMutex(Descriptor->LockAddress);
437 }
438 }
439
440 /* PUBLIC FUNCTIONS ***********************************************************/
441
442 /*
443 * @implemented
444 */
445 PVOID
446 NTAPI
447 ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
448 IN SIZE_T NumberOfBytes,
449 IN ULONG Tag)
450 {
451 PPOOL_DESCRIPTOR PoolDesc;
452 PLIST_ENTRY ListHead;
453 PPOOL_HEADER Entry, NextEntry, FragmentEntry;
454 KIRQL OldIrql;
455 USHORT BlockSize, i;
456
457 //
458 // Some sanity checks
459 //
460 ASSERT(Tag != 0);
461 ASSERT(Tag != ' GIB');
462 ASSERT(NumberOfBytes != 0);
463
464 //
465 // Get the pool type and its corresponding vector for this request
466 //
467 PoolType = PoolType & BASE_POOL_TYPE_MASK;
468 PoolDesc = PoolVector[PoolType];
469 ASSERT(PoolDesc != NULL);
470
471 //
472 // Check if this is a special pool allocation
473 //
474 if (MmUseSpecialPool(NumberOfBytes, Tag))
475 {
476 //
477 // Try to allocate using special pool
478 //
479 Entry = MmAllocateSpecialPool(NumberOfBytes, Tag, PoolType, 2);
480 if (Entry) return Entry;
481 }
482
483 //
484 // Check if this is a big page allocation
485 //
486 if (NumberOfBytes > POOL_MAX_ALLOC)
487 {
488 //
489 // Then just return the number of pages requested
490 //
491 return MiAllocatePoolPages(PoolType, NumberOfBytes);
492 }
493
494 //
495 // Should never request 0 bytes from the pool, but since so many drivers do
496 // it, we'll just assume they want 1 byte, based on NT's similar behavior
497 //
498 if (!NumberOfBytes) NumberOfBytes = 1;
499
500 //
501 // A pool allocation is defined by its data, a linked list to connect it to
502 // the free list (if necessary), and a pool header to store accounting info.
503 // Calculate this size, then convert it into a block size (units of pool
504 // headers)
505 //
506 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
507 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
508 // the direct allocation of pages.
509 //
510 i = (USHORT)((NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1))
511 / POOL_BLOCK_SIZE);
512
513 //
514 // Loop in the free lists looking for a block if this size. Start with the
515 // list optimized for this kind of size lookup
516 //
517 ListHead = &PoolDesc->ListHeads[i];
518 do
519 {
520 //
521 // Are there any free entries available on this list?
522 //
523 if (!ExpIsPoolListEmpty(ListHead))
524 {
525 //
526 // Acquire the pool lock now
527 //
528 OldIrql = ExLockPool(PoolDesc);
529
530 //
531 // And make sure the list still has entries
532 //
533 if (ExpIsPoolListEmpty(ListHead))
534 {
535 //
536 // Someone raced us (and won) before we had a chance to acquire
537 // the lock.
538 //
539 // Try again!
540 //
541 ExUnlockPool(PoolDesc, OldIrql);
542 ListHead++;
543 continue;
544 }
545
546 //
547 // Remove a free entry from the list
548 // Note that due to the way we insert free blocks into multiple lists
549 // there is a guarantee that any block on this list will either be
550 // of the correct size, or perhaps larger.
551 //
552 ExpCheckPoolLinks(ListHead);
553 Entry = POOL_ENTRY(ExpRemovePoolHeadList(ListHead));
554 ExpCheckPoolLinks(ListHead);
555 ExpCheckPoolBlocks(Entry);
556 ASSERT(Entry->BlockSize >= i);
557 ASSERT(Entry->PoolType == 0);
558
559 //
560 // Check if this block is larger that what we need. The block could
561 // not possibly be smaller, due to the reason explained above (and
562 // we would've asserted on a checked build if this was the case).
563 //
564 if (Entry->BlockSize != i)
565 {
566 //
567 // Is there an entry before this one?
568 //
569 if (Entry->PreviousSize == 0)
570 {
571 //
572 // There isn't anyone before us, so take the next block and
573 // turn it into a fragment that contains the leftover data
574 // that we don't need to satisfy the caller's request
575 //
576 FragmentEntry = POOL_BLOCK(Entry, i);
577 FragmentEntry->BlockSize = Entry->BlockSize - i;
578
579 //
580 // And make it point back to us
581 //
582 FragmentEntry->PreviousSize = i;
583
584 //
585 // Now get the block that follows the new fragment and check
586 // if it's still on the same page as us (and not at the end)
587 //
588 NextEntry = POOL_NEXT_BLOCK(FragmentEntry);
589 if (PAGE_ALIGN(NextEntry) != NextEntry)
590 {
591 //
592 // Adjust this next block to point to our newly created
593 // fragment block
594 //
595 NextEntry->PreviousSize = FragmentEntry->BlockSize;
596 }
597 }
598 else
599 {
600 //
601 // There is a free entry before us, which we know is smaller
602 // so we'll make this entry the fragment instead
603 //
604 FragmentEntry = Entry;
605
606 //
607 // And then we'll remove from it the actual size required.
608 // Now the entry is a leftover free fragment
609 //
610 Entry->BlockSize -= i;
611
612 //
613 // Now let's go to the next entry after the fragment (which
614 // used to point to our original free entry) and make it
615 // reference the new fragment entry instead.
616 //
617 // This is the entry that will actually end up holding the
618 // allocation!
619 //
620 Entry = POOL_NEXT_BLOCK(Entry);
621 Entry->PreviousSize = FragmentEntry->BlockSize;
622
623 //
624 // And now let's go to the entry after that one and check if
625 // it's still on the same page, and not at the end
626 //
627 NextEntry = POOL_BLOCK(Entry, i);
628 if (PAGE_ALIGN(NextEntry) != NextEntry)
629 {
630 //
631 // Make it reference the allocation entry
632 //
633 NextEntry->PreviousSize = i;
634 }
635 }
636
637 //
638 // Now our (allocation) entry is the right size
639 //
640 Entry->BlockSize = i;
641
642 //
643 // And the next entry is now the free fragment which contains
644 // the remaining difference between how big the original entry
645 // was, and the actual size the caller needs/requested.
646 //
647 FragmentEntry->PoolType = 0;
648 BlockSize = FragmentEntry->BlockSize;
649
650 //
651 // Now check if enough free bytes remained for us to have a
652 // "full" entry, which contains enough bytes for a linked list
653 // and thus can be used for allocations (up to 8 bytes...)
654 //
655 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
656 if (BlockSize != 1)
657 {
658 //
659 // Insert the free entry into the free list for this size
660 //
661 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
662 POOL_FREE_BLOCK(FragmentEntry));
663 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
664 }
665 }
666
667 //
668 // We have found an entry for this allocation, so set the pool type
669 // and release the lock since we're done
670 //
671 Entry->PoolType = PoolType + 1;
672 ExpCheckPoolBlocks(Entry);
673 ExUnlockPool(PoolDesc, OldIrql);
674
675 //
676 // Return the pool allocation
677 //
678 Entry->PoolTag = Tag;
679 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
680 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
681 return POOL_FREE_BLOCK(Entry);
682 }
683 } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
684
685 //
686 // There were no free entries left, so we have to allocate a new fresh page
687 //
688 Entry = MiAllocatePoolPages(PoolType, PAGE_SIZE);
689 if (Entry == NULL)
690 return NULL;
691
692 Entry->Ulong1 = 0;
693 Entry->BlockSize = i;
694 Entry->PoolType = PoolType + 1;
695
696 //
697 // This page will have two entries -- one for the allocation (which we just
698 // created above), and one for the remaining free bytes, which we're about
699 // to create now. The free bytes are the whole page minus what was allocated
700 // and then converted into units of block headers.
701 //
702 BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i;
703 FragmentEntry = POOL_BLOCK(Entry, i);
704 FragmentEntry->Ulong1 = 0;
705 FragmentEntry->BlockSize = BlockSize;
706 FragmentEntry->PreviousSize = i;
707
708 //
709 // Now check if enough free bytes remained for us to have a "full" entry,
710 // which contains enough bytes for a linked list and thus can be used for
711 // allocations (up to 8 bytes...)
712 //
713 if (FragmentEntry->BlockSize != 1)
714 {
715 //
716 // Excellent -- acquire the pool lock
717 //
718 OldIrql = ExLockPool(PoolDesc);
719
720 //
721 // And insert the free entry into the free list for this block size
722 //
723 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
724 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
725 POOL_FREE_BLOCK(FragmentEntry));
726 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
727
728 //
729 // Release the pool lock
730 //
731 ExpCheckPoolBlocks(Entry);
732 ExUnlockPool(PoolDesc, OldIrql);
733 }
734
735 //
736 // And return the pool allocation
737 //
738 ExpCheckPoolBlocks(Entry);
739 Entry->PoolTag = Tag;
740 return POOL_FREE_BLOCK(Entry);
741 }
742
743 /*
744 * @implemented
745 */
746 PVOID
747 NTAPI
748 ExAllocatePool(POOL_TYPE PoolType,
749 SIZE_T NumberOfBytes)
750 {
751 //
752 // Use a default tag of "None"
753 //
754 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, TAG_NONE);
755 }
756
757 /*
758 * @implemented
759 */
760 VOID
761 NTAPI
762 ExFreePoolWithTag(IN PVOID P,
763 IN ULONG TagToFree)
764 {
765 PPOOL_HEADER Entry, NextEntry;
766 USHORT BlockSize;
767 KIRQL OldIrql;
768 POOL_TYPE PoolType;
769 PPOOL_DESCRIPTOR PoolDesc;
770 BOOLEAN Combined = FALSE;
771
772 //
773 // Check if it was allocated from a special pool
774 //
775 if (MmIsSpecialPoolAddress(P))
776 {
777 //
778 // It is, so handle it via special pool free routine
779 //
780 MmFreeSpecialPool(P);
781 return;
782 }
783
784 //
785 // Quickly deal with big page allocations
786 //
787 if (PAGE_ALIGN(P) == P)
788 {
789 MiFreePoolPages(P);
790 return;
791 }
792
793 //
794 // Get the entry for this pool allocation
795 // The pointer math here may look wrong or confusing, but it is quite right
796 //
797 Entry = P;
798 Entry--;
799
800 //
801 // Get the size of the entry, and it's pool type, then load the descriptor
802 // for this pool type
803 //
804 BlockSize = Entry->BlockSize;
805 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
806 PoolDesc = PoolVector[PoolType];
807
808 //
809 // Get the pointer to the next entry
810 //
811 NextEntry = POOL_BLOCK(Entry, BlockSize);
812
813 //
814 // Acquire the pool lock
815 //
816 OldIrql = ExLockPool(PoolDesc);
817
818 //
819 // Check block tag
820 //
821 if (TagToFree && TagToFree != Entry->PoolTag)
822 {
823 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Entry->PoolTag);
824 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Entry->PoolTag, TagToFree);
825 }
826
827 //
828 // Check if the next allocation is at the end of the page
829 //
830 ExpCheckPoolBlocks(Entry);
831 if (PAGE_ALIGN(NextEntry) != NextEntry)
832 {
833 //
834 // We may be able to combine the block if it's free
835 //
836 if (NextEntry->PoolType == 0)
837 {
838 //
839 // The next block is free, so we'll do a combine
840 //
841 Combined = TRUE;
842
843 //
844 // Make sure there's actual data in the block -- anything smaller
845 // than this means we only have the header, so there's no linked list
846 // for us to remove
847 //
848 if ((NextEntry->BlockSize != 1))
849 {
850 //
851 // The block is at least big enough to have a linked list, so go
852 // ahead and remove it
853 //
854 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
855 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
856 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
857 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
858 }
859
860 //
861 // Our entry is now combined with the next entry
862 //
863 Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
864 }
865 }
866
867 //
868 // Now check if there was a previous entry on the same page as us
869 //
870 if (Entry->PreviousSize)
871 {
872 //
873 // Great, grab that entry and check if it's free
874 //
875 NextEntry = POOL_PREV_BLOCK(Entry);
876 if (NextEntry->PoolType == 0)
877 {
878 //
879 // It is, so we can do a combine
880 //
881 Combined = TRUE;
882
883 //
884 // Make sure there's actual data in the block -- anything smaller
885 // than this means we only have the header so there's no linked list
886 // for us to remove
887 //
888 if ((NextEntry->BlockSize != 1))
889 {
890 //
891 // The block is at least big enough to have a linked list, so go
892 // ahead and remove it
893 //
894 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
895 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
896 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
897 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
898 }
899
900 //
901 // Combine our original block (which might've already been combined
902 // with the next block), into the previous block
903 //
904 NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
905
906 //
907 // And now we'll work with the previous block instead
908 //
909 Entry = NextEntry;
910 }
911 }
912
913 //
914 // By now, it may have been possible for our combined blocks to actually
915 // have made up a full page (if there were only 2-3 allocations on the
916 // page, they could've all been combined).
917 //
918 if ((PAGE_ALIGN(Entry) == Entry) &&
919 (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry)) == POOL_NEXT_BLOCK(Entry)))
920 {
921 //
922 // In this case, release the pool lock, and free the page
923 //
924 ExUnlockPool(PoolDesc, OldIrql);
925 MiFreePoolPages(Entry);
926 return;
927 }
928
929 //
930 // Otherwise, we now have a free block (or a combination of 2 or 3)
931 //
932 Entry->PoolType = 0;
933 BlockSize = Entry->BlockSize;
934 ASSERT(BlockSize != 1);
935
936 //
937 // Check if we actually did combine it with anyone
938 //
939 if (Combined)
940 {
941 //
942 // Get the first combined block (either our original to begin with, or
943 // the one after the original, depending if we combined with the previous)
944 //
945 NextEntry = POOL_NEXT_BLOCK(Entry);
946
947 //
948 // As long as the next block isn't on a page boundary, have it point
949 // back to us
950 //
951 if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
952 }
953
954 //
955 // Insert this new free block, and release the pool lock
956 //
957 ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
958 ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry));
959 ExUnlockPool(PoolDesc, OldIrql);
960 }
961
962 /*
963 * @implemented
964 */
965 VOID
966 NTAPI
967 ExFreePool(PVOID P)
968 {
969 //
970 // Just free without checking for the tag
971 //
972 ExFreePoolWithTag(P, 0);
973 }
974
975 /*
976 * @unimplemented
977 */
978 SIZE_T
979 NTAPI
980 ExQueryPoolBlockSize(IN PVOID PoolBlock,
981 OUT PBOOLEAN QuotaCharged)
982 {
983 //
984 // Not implemented
985 //
986 UNIMPLEMENTED;
987 return FALSE;
988 }
989
990 /*
991 * @implemented
992 */
993
994 PVOID
995 NTAPI
996 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType,
997 IN SIZE_T NumberOfBytes)
998 {
999 //
1000 // Allocate the pool
1001 //
1002 return ExAllocatePoolWithQuotaTag(PoolType, NumberOfBytes, 'enoN');
1003 }
1004
1005 /*
1006 * @implemented
1007 */
1008 PVOID
1009 NTAPI
1010 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType,
1011 IN SIZE_T NumberOfBytes,
1012 IN ULONG Tag,
1013 IN EX_POOL_PRIORITY Priority)
1014 {
1015 //
1016 // Allocate the pool
1017 //
1018 UNIMPLEMENTED;
1019 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
1020 }
1021
1022 /*
1023 * @implemented
1024 */
1025 PVOID
1026 NTAPI
1027 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType,
1028 IN SIZE_T NumberOfBytes,
1029 IN ULONG Tag)
1030 {
1031 //
1032 // Allocate the pool
1033 //
1034 UNIMPLEMENTED;
1035 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
1036 }
1037
1038 /* EOF */