sync with trunk (r49238)
[reactos.git] / ntoskrnl / mm / ARM3 / expool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::EXPOOL"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
18
19 #undef ExAllocatePoolWithQuota
20 #undef ExAllocatePoolWithQuotaTag
21
22 /* GLOBALS ********************************************************************/
23
24 ULONG ExpNumberOfPagedPools;
25 POOL_DESCRIPTOR NonPagedPoolDescriptor;
26 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16 + 1];
27 PPOOL_DESCRIPTOR PoolVector[2];
28 PVOID PoolTrackTable;
29 PKGUARDED_MUTEX ExpPagedPoolMutex;
30
31 /* Pool block/header/list access macros */
32 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
33 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER))
34 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
35 #define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
36 #define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize))
37
38 /*
39 * Pool list access debug macros, similar to Arthur's pfnlist.c work.
40 * Microsoft actually implements similar checks in the Windows Server 2003 SP1
41 * pool code, but only for checked builds.
42 *
43 * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
44 * that these checks are done even on retail builds, due to the increasing
45 * number of kernel-mode attacks which depend on dangling list pointers and other
46 * kinds of list-based attacks.
47 *
48 * For now, I will leave these checks on all the time, but later they are likely
49 * to be DBG-only, at least until there are enough kernel-mode security attacks
50 * against ReactOS to warrant the performance hit.
51 *
52 * For now, these are not made inline, so we can get good stack traces.
53 */
54 PLIST_ENTRY
55 NTAPI
56 ExpDecodePoolLink(IN PLIST_ENTRY Link)
57 {
58 return (PLIST_ENTRY)((ULONG_PTR)Link & ~1);
59 }
60
61 PLIST_ENTRY
62 NTAPI
63 ExpEncodePoolLink(IN PLIST_ENTRY Link)
64 {
65 return (PLIST_ENTRY)((ULONG_PTR)Link | 1);
66 }
67
68 VOID
69 NTAPI
70 ExpCheckPoolLinks(IN PLIST_ENTRY ListHead)
71 {
72 if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) ||
73 (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead))
74 {
75 KeBugCheckEx(BAD_POOL_HEADER,
76 3,
77 (ULONG_PTR)ListHead,
78 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink),
79 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink));
80 }
81 }
82
83 VOID
84 NTAPI
85 ExpInitializePoolListHead(IN PLIST_ENTRY ListHead)
86 {
87 ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead);
88 }
89
90 BOOLEAN
91 NTAPI
92 ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead)
93 {
94 return (ExpDecodePoolLink(ListHead->Flink) == ListHead);
95 }
96
97 VOID
98 NTAPI
99 ExpRemovePoolEntryList(IN PLIST_ENTRY Entry)
100 {
101 PLIST_ENTRY Blink, Flink;
102 Flink = ExpDecodePoolLink(Entry->Flink);
103 Blink = ExpDecodePoolLink(Entry->Blink);
104 Flink->Blink = ExpEncodePoolLink(Blink);
105 Blink->Flink = ExpEncodePoolLink(Flink);
106 }
107
108 PLIST_ENTRY
109 NTAPI
110 ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead)
111 {
112 PLIST_ENTRY Entry, Flink;
113 Entry = ExpDecodePoolLink(ListHead->Flink);
114 Flink = ExpDecodePoolLink(Entry->Flink);
115 ListHead->Flink = ExpEncodePoolLink(Flink);
116 Flink->Blink = ExpEncodePoolLink(ListHead);
117 return Entry;
118 }
119
120 PLIST_ENTRY
121 NTAPI
122 ExpRemovePoolTailList(IN PLIST_ENTRY ListHead)
123 {
124 PLIST_ENTRY Entry, Blink;
125 Entry = ExpDecodePoolLink(ListHead->Blink);
126 Blink = ExpDecodePoolLink(Entry->Blink);
127 ListHead->Blink = ExpEncodePoolLink(Blink);
128 Blink->Flink = ExpEncodePoolLink(ListHead);
129 return Entry;
130 }
131
132 VOID
133 NTAPI
134 ExpInsertPoolTailList(IN PLIST_ENTRY ListHead,
135 IN PLIST_ENTRY Entry)
136 {
137 PLIST_ENTRY Blink;
138 ExpCheckPoolLinks(ListHead);
139 Blink = ExpDecodePoolLink(ListHead->Blink);
140 Entry->Flink = ExpEncodePoolLink(ListHead);
141 Entry->Blink = ExpEncodePoolLink(Blink);
142 Blink->Flink = ExpEncodePoolLink(Entry);
143 ListHead->Blink = ExpEncodePoolLink(Entry);
144 ExpCheckPoolLinks(ListHead);
145 }
146
147 VOID
148 NTAPI
149 ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead,
150 IN PLIST_ENTRY Entry)
151 {
152 PLIST_ENTRY Flink;
153 ExpCheckPoolLinks(ListHead);
154 Flink = ExpDecodePoolLink(ListHead->Flink);
155 Entry->Flink = ExpEncodePoolLink(Flink);
156 Entry->Blink = ExpEncodePoolLink(ListHead);
157 Flink->Blink = ExpEncodePoolLink(Entry);
158 ListHead->Flink = ExpEncodePoolLink(Entry);
159 ExpCheckPoolLinks(ListHead);
160 }
161
162 VOID
163 NTAPI
164 ExpCheckPoolHeader(IN PPOOL_HEADER Entry)
165 {
166 PPOOL_HEADER PreviousEntry, NextEntry;
167
168 /* Is there a block before this one? */
169 if (Entry->PreviousSize)
170 {
171 /* Get it */
172 PreviousEntry = POOL_PREV_BLOCK(Entry);
173
174 /* The two blocks must be on the same page! */
175 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry))
176 {
177 /* Something is awry */
178 KeBugCheckEx(BAD_POOL_HEADER,
179 6,
180 (ULONG_PTR)PreviousEntry,
181 __LINE__,
182 (ULONG_PTR)Entry);
183 }
184
185 /* This block should also indicate that it's as large as we think it is */
186 if (PreviousEntry->BlockSize != Entry->PreviousSize)
187 {
188 /* Otherwise, someone corrupted one of the sizes */
189 KeBugCheckEx(BAD_POOL_HEADER,
190 5,
191 (ULONG_PTR)PreviousEntry,
192 __LINE__,
193 (ULONG_PTR)Entry);
194 }
195 }
196 else if (PAGE_ALIGN(Entry) != Entry)
197 {
198 /* If there's no block before us, we are the first block, so we should be on a page boundary */
199 KeBugCheckEx(BAD_POOL_HEADER,
200 7,
201 0,
202 __LINE__,
203 (ULONG_PTR)Entry);
204 }
205
206 /* This block must have a size */
207 if (!Entry->BlockSize)
208 {
209 /* Someone must've corrupted this field */
210 KeBugCheckEx(BAD_POOL_HEADER,
211 8,
212 0,
213 __LINE__,
214 (ULONG_PTR)Entry);
215 }
216
217 /* Okay, now get the next block */
218 NextEntry = POOL_NEXT_BLOCK(Entry);
219
220 /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
221 if (PAGE_ALIGN(NextEntry) != NextEntry)
222 {
223 /* The two blocks must be on the same page! */
224 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry))
225 {
226 /* Something is messed up */
227 KeBugCheckEx(BAD_POOL_HEADER,
228 9,
229 (ULONG_PTR)NextEntry,
230 __LINE__,
231 (ULONG_PTR)Entry);
232 }
233
234 /* And this block should think we are as large as we truly are */
235 if (NextEntry->PreviousSize != Entry->BlockSize)
236 {
237 /* Otherwise, someone corrupted the field */
238 KeBugCheckEx(BAD_POOL_HEADER,
239 5,
240 (ULONG_PTR)NextEntry,
241 __LINE__,
242 (ULONG_PTR)Entry);
243 }
244 }
245 }
246
247 VOID
248 NTAPI
249 ExpCheckPoolBlocks(IN PVOID Block)
250 {
251 BOOLEAN FoundBlock;
252 SIZE_T Size = 0;
253 PPOOL_HEADER Entry;
254
255 /* Get the first entry for this page, make sure it really is the first */
256 Entry = PAGE_ALIGN(Block);
257 ASSERT(Entry->PreviousSize == 0);
258
259 /* Now scan each entry */
260 while (TRUE)
261 {
262 /* When we actually found our block, remember this */
263 if (Entry == Block) FoundBlock = TRUE;
264
265 /* Now validate this block header */
266 ExpCheckPoolHeader(Entry);
267
268 /* And go to the next one, keeping track of our size */
269 Size += Entry->BlockSize;
270 Entry = POOL_NEXT_BLOCK(Entry);
271
272 /* If we hit the last block, stop */
273 if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break;
274
275 /* If we hit the end of the page, stop */
276 if (PAGE_ALIGN(Entry) == Entry) break;
277 }
278
279 /* We must've found our block, and we must have hit the end of the page */
280 if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock))
281 {
282 /* Otherwise, the blocks are messed up */
283 KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry);
284 }
285 }
286
287 /* PRIVATE FUNCTIONS **********************************************************/
288
289 VOID
290 NTAPI
291 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor,
292 IN POOL_TYPE PoolType,
293 IN ULONG PoolIndex,
294 IN ULONG Threshold,
295 IN PVOID PoolLock)
296 {
297 PLIST_ENTRY NextEntry, LastEntry;
298
299 //
300 // Setup the descriptor based on the caller's request
301 //
302 PoolDescriptor->PoolType = PoolType;
303 PoolDescriptor->PoolIndex = PoolIndex;
304 PoolDescriptor->Threshold = Threshold;
305 PoolDescriptor->LockAddress = PoolLock;
306
307 //
308 // Initialize accounting data
309 //
310 PoolDescriptor->RunningAllocs = 0;
311 PoolDescriptor->RunningDeAllocs = 0;
312 PoolDescriptor->TotalPages = 0;
313 PoolDescriptor->TotalBytes = 0;
314 PoolDescriptor->TotalBigPages = 0;
315
316 //
317 // Nothing pending for now
318 //
319 PoolDescriptor->PendingFrees = NULL;
320 PoolDescriptor->PendingFreeDepth = 0;
321
322 //
323 // Loop all the descriptor's allocation lists and initialize them
324 //
325 NextEntry = PoolDescriptor->ListHeads;
326 LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
327 while (NextEntry < LastEntry)
328 {
329 ExpInitializePoolListHead(NextEntry);
330 NextEntry++;
331 }
332 }
333
334 VOID
335 NTAPI
336 InitializePool(IN POOL_TYPE PoolType,
337 IN ULONG Threshold)
338 {
339 PPOOL_DESCRIPTOR Descriptor;
340
341 //
342 // Check what kind of pool this is
343 //
344 if (PoolType == NonPagedPool)
345 {
346 //
347 // Initialize the nonpaged pool descriptor
348 //
349 PoolVector[NonPagedPool] = &NonPagedPoolDescriptor;
350 ExInitializePoolDescriptor(PoolVector[NonPagedPool],
351 NonPagedPool,
352 0,
353 Threshold,
354 NULL);
355 }
356 else
357 {
358 //
359 // Allocate the pool descriptor
360 //
361 Descriptor = ExAllocatePoolWithTag(NonPagedPool,
362 sizeof(KGUARDED_MUTEX) +
363 sizeof(POOL_DESCRIPTOR),
364 'looP');
365 if (!Descriptor)
366 {
367 //
368 // This is really bad...
369 //
370 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
371 0,
372 -1,
373 -1,
374 -1);
375 }
376
377 //
378 // Setup the vector and guarded mutex for paged pool
379 //
380 PoolVector[PagedPool] = Descriptor;
381 ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1);
382 KeInitializeGuardedMutex(ExpPagedPoolMutex);
383 ExInitializePoolDescriptor(Descriptor,
384 PagedPool,
385 0,
386 Threshold,
387 ExpPagedPoolMutex);
388 }
389 }
390
391 FORCEINLINE
392 KIRQL
393 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
394 {
395 //
396 // Check if this is nonpaged pool
397 //
398 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
399 {
400 //
401 // Use the queued spin lock
402 //
403 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
404 }
405 else
406 {
407 //
408 // Use the guarded mutex
409 //
410 KeAcquireGuardedMutex(Descriptor->LockAddress);
411 return APC_LEVEL;
412 }
413 }
414
415 FORCEINLINE
416 VOID
417 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor,
418 IN KIRQL OldIrql)
419 {
420 //
421 // Check if this is nonpaged pool
422 //
423 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
424 {
425 //
426 // Use the queued spin lock
427 //
428 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
429 }
430 else
431 {
432 //
433 // Use the guarded mutex
434 //
435 KeReleaseGuardedMutex(Descriptor->LockAddress);
436 }
437 }
438
439 /* PUBLIC FUNCTIONS ***********************************************************/
440
441 /*
442 * @implemented
443 */
444 PVOID
445 NTAPI
446 ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
447 IN SIZE_T NumberOfBytes,
448 IN ULONG Tag)
449 {
450 PPOOL_DESCRIPTOR PoolDesc;
451 PLIST_ENTRY ListHead;
452 PPOOL_HEADER Entry, NextEntry, FragmentEntry;
453 KIRQL OldIrql;
454 ULONG BlockSize, i;
455
456 //
457 // Some sanity checks
458 //
459 ASSERT(Tag != 0);
460 ASSERT(Tag != ' GIB');
461 ASSERT(NumberOfBytes != 0);
462
463 //
464 // Get the pool type and its corresponding vector for this request
465 //
466 PoolType = PoolType & BASE_POOL_TYPE_MASK;
467 PoolDesc = PoolVector[PoolType];
468 ASSERT(PoolDesc != NULL);
469
470 //
471 // Check if this is a big page allocation
472 //
473 if (NumberOfBytes > POOL_MAX_ALLOC)
474 {
475 //
476 // Then just return the number of pages requested
477 //
478 return MiAllocatePoolPages(PoolType, NumberOfBytes);
479 }
480
481 //
482 // Should never request 0 bytes from the pool, but since so many drivers do
483 // it, we'll just assume they want 1 byte, based on NT's similar behavior
484 //
485 if (!NumberOfBytes) NumberOfBytes = 1;
486
487 //
488 // A pool allocation is defined by its data, a linked list to connect it to
489 // the free list (if necessary), and a pool header to store accounting info.
490 // Calculate this size, then convert it into a block size (units of pool
491 // headers)
492 //
493 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
494 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
495 // the direct allocation of pages.
496 //
497 i = (NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1)) / POOL_BLOCK_SIZE;
498
499 //
500 // Loop in the free lists looking for a block if this size. Start with the
501 // list optimized for this kind of size lookup
502 //
503 ListHead = &PoolDesc->ListHeads[i];
504 do
505 {
506 //
507 // Are there any free entries available on this list?
508 //
509 if (!ExpIsPoolListEmpty(ListHead))
510 {
511 //
512 // Acquire the pool lock now
513 //
514 OldIrql = ExLockPool(PoolDesc);
515
516 //
517 // And make sure the list still has entries
518 //
519 if (ExpIsPoolListEmpty(ListHead))
520 {
521 //
522 // Someone raced us (and won) before we had a chance to acquire
523 // the lock.
524 //
525 // Try again!
526 //
527 ExUnlockPool(PoolDesc, OldIrql);
528 ListHead++;
529 continue;
530 }
531
532 //
533 // Remove a free entry from the list
534 // Note that due to the way we insert free blocks into multiple lists
535 // there is a guarantee that any block on this list will either be
536 // of the correct size, or perhaps larger.
537 //
538 ExpCheckPoolLinks(ListHead);
539 Entry = POOL_ENTRY(ExpRemovePoolHeadList(ListHead));
540 ExpCheckPoolLinks(ListHead);
541 ExpCheckPoolBlocks(Entry);
542 ASSERT(Entry->BlockSize >= i);
543 ASSERT(Entry->PoolType == 0);
544
545 //
546 // Check if this block is larger that what we need. The block could
547 // not possibly be smaller, due to the reason explained above (and
548 // we would've asserted on a checked build if this was the case).
549 //
550 if (Entry->BlockSize != i)
551 {
552 //
553 // Is there an entry before this one?
554 //
555 if (Entry->PreviousSize == 0)
556 {
557 //
558 // There isn't anyone before us, so take the next block and
559 // turn it into a fragment that contains the leftover data
560 // that we don't need to satisfy the caller's request
561 //
562 FragmentEntry = POOL_BLOCK(Entry, i);
563 FragmentEntry->BlockSize = Entry->BlockSize - i;
564
565 //
566 // And make it point back to us
567 //
568 FragmentEntry->PreviousSize = i;
569
570 //
571 // Now get the block that follows the new fragment and check
572 // if it's still on the same page as us (and not at the end)
573 //
574 NextEntry = POOL_NEXT_BLOCK(FragmentEntry);
575 if (PAGE_ALIGN(NextEntry) != NextEntry)
576 {
577 //
578 // Adjust this next block to point to our newly created
579 // fragment block
580 //
581 NextEntry->PreviousSize = FragmentEntry->BlockSize;
582 }
583 }
584 else
585 {
586 //
587 // There is a free entry before us, which we know is smaller
588 // so we'll make this entry the fragment instead
589 //
590 FragmentEntry = Entry;
591
592 //
593 // And then we'll remove from it the actual size required.
594 // Now the entry is a leftover free fragment
595 //
596 Entry->BlockSize -= i;
597
598 //
599 // Now let's go to the next entry after the fragment (which
600 // used to point to our original free entry) and make it
601 // reference the new fragment entry instead.
602 //
603 // This is the entry that will actually end up holding the
604 // allocation!
605 //
606 Entry = POOL_NEXT_BLOCK(Entry);
607 Entry->PreviousSize = FragmentEntry->BlockSize;
608
609 //
610 // And now let's go to the entry after that one and check if
611 // it's still on the same page, and not at the end
612 //
613 NextEntry = POOL_BLOCK(Entry, i);
614 if (PAGE_ALIGN(NextEntry) != NextEntry)
615 {
616 //
617 // Make it reference the allocation entry
618 //
619 NextEntry->PreviousSize = i;
620 }
621 }
622
623 //
624 // Now our (allocation) entry is the right size
625 //
626 Entry->BlockSize = i;
627
628 //
629 // And the next entry is now the free fragment which contains
630 // the remaining difference between how big the original entry
631 // was, and the actual size the caller needs/requested.
632 //
633 FragmentEntry->PoolType = 0;
634 BlockSize = FragmentEntry->BlockSize;
635
636 //
637 // Now check if enough free bytes remained for us to have a
638 // "full" entry, which contains enough bytes for a linked list
639 // and thus can be used for allocations (up to 8 bytes...)
640 //
641 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
642 if (BlockSize != 1)
643 {
644 //
645 // Insert the free entry into the free list for this size
646 //
647 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
648 POOL_FREE_BLOCK(FragmentEntry));
649 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
650 }
651 }
652
653 //
654 // We have found an entry for this allocation, so set the pool type
655 // and release the lock since we're done
656 //
657 Entry->PoolType = PoolType + 1;
658 ExpCheckPoolBlocks(Entry);
659 ExUnlockPool(PoolDesc, OldIrql);
660
661 //
662 // Return the pool allocation
663 //
664 Entry->PoolTag = Tag;
665 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
666 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
667 return POOL_FREE_BLOCK(Entry);
668 }
669 } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
670
671 //
672 // There were no free entries left, so we have to allocate a new fresh page
673 //
674 Entry = MiAllocatePoolPages(PoolType, PAGE_SIZE);
675 ASSERT(Entry != NULL);
676 Entry->Ulong1 = 0;
677 Entry->BlockSize = i;
678 Entry->PoolType = PoolType + 1;
679
680 //
681 // This page will have two entries -- one for the allocation (which we just
682 // created above), and one for the remaining free bytes, which we're about
683 // to create now. The free bytes are the whole page minus what was allocated
684 // and then converted into units of block headers.
685 //
686 BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i;
687 FragmentEntry = POOL_BLOCK(Entry, i);
688 FragmentEntry->Ulong1 = 0;
689 FragmentEntry->BlockSize = BlockSize;
690 FragmentEntry->PreviousSize = i;
691
692 //
693 // Now check if enough free bytes remained for us to have a "full" entry,
694 // which contains enough bytes for a linked list and thus can be used for
695 // allocations (up to 8 bytes...)
696 //
697 if (FragmentEntry->BlockSize != 1)
698 {
699 //
700 // Excellent -- acquire the pool lock
701 //
702 OldIrql = ExLockPool(PoolDesc);
703
704 //
705 // And insert the free entry into the free list for this block size
706 //
707 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
708 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
709 POOL_FREE_BLOCK(FragmentEntry));
710 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
711
712 //
713 // Release the pool lock
714 //
715 ExpCheckPoolBlocks(Entry);
716 ExUnlockPool(PoolDesc, OldIrql);
717 }
718
719 //
720 // And return the pool allocation
721 //
722 ExpCheckPoolBlocks(Entry);
723 Entry->PoolTag = Tag;
724 return POOL_FREE_BLOCK(Entry);
725 }
726
727 /*
728 * @implemented
729 */
730 PVOID
731 NTAPI
732 ExAllocatePool(POOL_TYPE PoolType,
733 SIZE_T NumberOfBytes)
734 {
735 //
736 // Use a default tag of "None"
737 //
738 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, 'enoN');
739 }
740
741 /*
742 * @implemented
743 */
744 VOID
745 NTAPI
746 ExFreePoolWithTag(IN PVOID P,
747 IN ULONG TagToFree)
748 {
749 PPOOL_HEADER Entry, NextEntry;
750 ULONG BlockSize;
751 KIRQL OldIrql;
752 POOL_TYPE PoolType;
753 PPOOL_DESCRIPTOR PoolDesc;
754 BOOLEAN Combined = FALSE;
755
756 //
757 // Quickly deal with big page allocations
758 //
759 if (PAGE_ALIGN(P) == P)
760 {
761 MiFreePoolPages(P);
762 return;
763 }
764
765 //
766 // Get the entry for this pool allocation
767 // The pointer math here may look wrong or confusing, but it is quite right
768 //
769 Entry = P;
770 Entry--;
771
772 //
773 // Get the size of the entry, and it's pool type, then load the descriptor
774 // for this pool type
775 //
776 BlockSize = Entry->BlockSize;
777 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
778 PoolDesc = PoolVector[PoolType];
779
780 //
781 // Get the pointer to the next entry
782 //
783 NextEntry = POOL_BLOCK(Entry, BlockSize);
784
785 //
786 // Acquire the pool lock
787 //
788 OldIrql = ExLockPool(PoolDesc);
789
790 //
791 // Check if the next allocation is at the end of the page
792 //
793 ExpCheckPoolBlocks(Entry);
794 if (PAGE_ALIGN(NextEntry) != NextEntry)
795 {
796 //
797 // We may be able to combine the block if it's free
798 //
799 if (NextEntry->PoolType == 0)
800 {
801 //
802 // The next block is free, so we'll do a combine
803 //
804 Combined = TRUE;
805
806 //
807 // Make sure there's actual data in the block -- anything smaller
808 // than this means we only have the header, so there's no linked list
809 // for us to remove
810 //
811 if ((NextEntry->BlockSize != 1))
812 {
813 //
814 // The block is at least big enough to have a linked list, so go
815 // ahead and remove it
816 //
817 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
818 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
819 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
820 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
821 }
822
823 //
824 // Our entry is now combined with the next entry
825 //
826 Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
827 }
828 }
829
830 //
831 // Now check if there was a previous entry on the same page as us
832 //
833 if (Entry->PreviousSize)
834 {
835 //
836 // Great, grab that entry and check if it's free
837 //
838 NextEntry = POOL_PREV_BLOCK(Entry);
839 if (NextEntry->PoolType == 0)
840 {
841 //
842 // It is, so we can do a combine
843 //
844 Combined = TRUE;
845
846 //
847 // Make sure there's actual data in the block -- anything smaller
848 // than this means we only have the header so there's no linked list
849 // for us to remove
850 //
851 if ((NextEntry->BlockSize != 1))
852 {
853 //
854 // The block is at least big enough to have a linked list, so go
855 // ahead and remove it
856 //
857 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
858 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
859 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
860 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
861 }
862
863 //
864 // Combine our original block (which might've already been combined
865 // with the next block), into the previous block
866 //
867 NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
868
869 //
870 // And now we'll work with the previous block instead
871 //
872 Entry = NextEntry;
873 }
874 }
875
876 //
877 // By now, it may have been possible for our combined blocks to actually
878 // have made up a full page (if there were only 2-3 allocations on the
879 // page, they could've all been combined).
880 //
881 if ((PAGE_ALIGN(Entry) == Entry) &&
882 (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry)) == POOL_NEXT_BLOCK(Entry)))
883 {
884 //
885 // In this case, release the pool lock, and free the page
886 //
887 ExUnlockPool(PoolDesc, OldIrql);
888 MiFreePoolPages(Entry);
889 return;
890 }
891
892 //
893 // Otherwise, we now have a free block (or a combination of 2 or 3)
894 //
895 Entry->PoolType = 0;
896 BlockSize = Entry->BlockSize;
897 ASSERT(BlockSize != 1);
898
899 //
900 // Check if we actually did combine it with anyone
901 //
902 if (Combined)
903 {
904 //
905 // Get the first combined block (either our original to begin with, or
906 // the one after the original, depending if we combined with the previous)
907 //
908 NextEntry = POOL_NEXT_BLOCK(Entry);
909
910 //
911 // As long as the next block isn't on a page boundary, have it point
912 // back to us
913 //
914 if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
915 }
916
917 //
918 // Insert this new free block, and release the pool lock
919 //
920 ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
921 ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry));
922 ExUnlockPool(PoolDesc, OldIrql);
923 }
924
925 /*
926 * @implemented
927 */
928 VOID
929 NTAPI
930 ExFreePool(PVOID P)
931 {
932 //
933 // Just free without checking for the tag
934 //
935 ExFreePoolWithTag(P, 0);
936 }
937
938 /*
939 * @unimplemented
940 */
941 SIZE_T
942 NTAPI
943 ExQueryPoolBlockSize(IN PVOID PoolBlock,
944 OUT PBOOLEAN QuotaCharged)
945 {
946 //
947 // Not implemented
948 //
949 UNIMPLEMENTED;
950 return FALSE;
951 }
952
953 /*
954 * @implemented
955 */
956
957 PVOID
958 NTAPI
959 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType,
960 IN SIZE_T NumberOfBytes)
961 {
962 //
963 // Allocate the pool
964 //
965 return ExAllocatePoolWithQuotaTag(PoolType, NumberOfBytes, 'enoN');
966 }
967
968 /*
969 * @implemented
970 */
971 PVOID
972 NTAPI
973 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType,
974 IN SIZE_T NumberOfBytes,
975 IN ULONG Tag,
976 IN EX_POOL_PRIORITY Priority)
977 {
978 //
979 // Allocate the pool
980 //
981 UNIMPLEMENTED;
982 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
983 }
984
985 /*
986 * @implemented
987 */
988 PVOID
989 NTAPI
990 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType,
991 IN SIZE_T NumberOfBytes,
992 IN ULONG Tag)
993 {
994 //
995 // Allocate the pool
996 //
997 UNIMPLEMENTED;
998 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
999 }
1000
1001 /* EOF */