79e5ef95dc7a0f215820d614eb18f0f3b43b231e
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / expool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::EXPOOL"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
18
19 #undef ExAllocatePoolWithQuota
20 #undef ExAllocatePoolWithQuotaTag
21
22 /* GLOBALS ********************************************************************/
23
24 ULONG ExpNumberOfPagedPools;
25 POOL_DESCRIPTOR NonPagedPoolDescriptor;
26 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16 + 1];
27 PPOOL_DESCRIPTOR PoolVector[2];
28 PVOID PoolTrackTable;
29 PKGUARDED_MUTEX ExpPagedPoolMutex;
30
31 /* Pool block/header/list access macros */
32 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
33 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER))
34 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
35 #define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
36 #define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize))
37
38 /*
39 * Pool list access debug macros, similar to Arthur's pfnlist.c work.
40 * Microsoft actually implements similar checks in the Windows Server 2003 SP1
41 * pool code, but only for checked builds.
42 *
43 * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
44 * that these checks are done even on retail builds, due to the increasing
45 * number of kernel-mode attacks which depend on dangling list pointers and other
46 * kinds of list-based attacks.
47 *
48 * For now, I will leave these checks on all the time, but later they are likely
49 * to be DBG-only, at least until there are enough kernel-mode security attacks
50 * against ReactOS to warrant the performance hit.
51 *
52 * For now, these are not made inline, so we can get good stack traces.
53 */
54 PLIST_ENTRY
55 NTAPI
56 ExpDecodePoolLink(IN PLIST_ENTRY Link)
57 {
58 return (PLIST_ENTRY)((ULONG_PTR)Link & ~1);
59 }
60
61 PLIST_ENTRY
62 NTAPI
63 ExpEncodePoolLink(IN PLIST_ENTRY Link)
64 {
65 return (PLIST_ENTRY)((ULONG_PTR)Link | 1);
66 }
67
68 VOID
69 NTAPI
70 ExpCheckPoolLinks(IN PLIST_ENTRY ListHead)
71 {
72 if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) ||
73 (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead))
74 {
75 KeBugCheckEx(BAD_POOL_HEADER,
76 3,
77 (ULONG_PTR)ListHead,
78 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink),
79 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink));
80 }
81 }
82
83 VOID
84 NTAPI
85 ExpInitializePoolListHead(IN PLIST_ENTRY ListHead)
86 {
87 ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead);
88 }
89
90 BOOLEAN
91 NTAPI
92 ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead)
93 {
94 return (ExpDecodePoolLink(ListHead->Flink) == ListHead);
95 }
96
97 VOID
98 NTAPI
99 ExpRemovePoolEntryList(IN PLIST_ENTRY Entry)
100 {
101 PLIST_ENTRY Blink, Flink;
102 Flink = ExpDecodePoolLink(Entry->Flink);
103 Blink = ExpDecodePoolLink(Entry->Blink);
104 Flink->Blink = ExpEncodePoolLink(Blink);
105 Blink->Flink = ExpEncodePoolLink(Flink);
106 }
107
108 PLIST_ENTRY
109 NTAPI
110 ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead)
111 {
112 PLIST_ENTRY Entry, Flink;
113 Entry = ExpDecodePoolLink(ListHead->Flink);
114 Flink = ExpDecodePoolLink(Entry->Flink);
115 ListHead->Flink = ExpEncodePoolLink(Flink);
116 Flink->Blink = ExpEncodePoolLink(ListHead);
117 return Entry;
118 }
119
120 PLIST_ENTRY
121 NTAPI
122 ExpRemovePoolTailList(IN PLIST_ENTRY ListHead)
123 {
124 PLIST_ENTRY Entry, Blink;
125 Entry = ExpDecodePoolLink(ListHead->Blink);
126 Blink = ExpDecodePoolLink(Entry->Blink);
127 ListHead->Blink = ExpEncodePoolLink(Blink);
128 Blink->Flink = ExpEncodePoolLink(ListHead);
129 return Entry;
130 }
131
132 VOID
133 NTAPI
134 ExpInsertPoolTailList(IN PLIST_ENTRY ListHead,
135 IN PLIST_ENTRY Entry)
136 {
137 PLIST_ENTRY Blink;
138 ExpCheckPoolLinks(ListHead);
139 Blink = ExpDecodePoolLink(ListHead->Blink);
140 Entry->Flink = ExpEncodePoolLink(ListHead);
141 Entry->Blink = ExpEncodePoolLink(Blink);
142 Blink->Flink = ExpEncodePoolLink(Entry);
143 ListHead->Blink = ExpEncodePoolLink(Entry);
144 ExpCheckPoolLinks(ListHead);
145 }
146
147 VOID
148 NTAPI
149 ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead,
150 IN PLIST_ENTRY Entry)
151 {
152 PLIST_ENTRY Flink;
153 ExpCheckPoolLinks(ListHead);
154 Flink = ExpDecodePoolLink(ListHead->Flink);
155 Entry->Flink = ExpEncodePoolLink(Flink);
156 Entry->Blink = ExpEncodePoolLink(ListHead);
157 Flink->Blink = ExpEncodePoolLink(Entry);
158 ListHead->Flink = ExpEncodePoolLink(Entry);
159 ExpCheckPoolLinks(ListHead);
160 }
161
162 VOID
163 NTAPI
164 ExpCheckPoolHeader(IN PPOOL_HEADER Entry)
165 {
166 PPOOL_HEADER PreviousEntry, NextEntry;
167
168 /* Is there a block before this one? */
169 if (Entry->PreviousSize)
170 {
171 /* Get it */
172 PreviousEntry = POOL_PREV_BLOCK(Entry);
173
174 /* The two blocks must be on the same page! */
175 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry))
176 {
177 /* Something is awry */
178 KeBugCheckEx(BAD_POOL_HEADER,
179 6,
180 (ULONG_PTR)PreviousEntry,
181 __LINE__,
182 (ULONG_PTR)Entry);
183 }
184
185 /* This block should also indicate that it's as large as we think it is */
186 if (PreviousEntry->BlockSize != Entry->PreviousSize)
187 {
188 /* Otherwise, someone corrupted one of the sizes */
189 KeBugCheckEx(BAD_POOL_HEADER,
190 5,
191 (ULONG_PTR)PreviousEntry,
192 __LINE__,
193 (ULONG_PTR)Entry);
194 }
195 }
196 else if (PAGE_ALIGN(Entry) != Entry)
197 {
198 /* If there's no block before us, we are the first block, so we should be on a page boundary */
199 KeBugCheckEx(BAD_POOL_HEADER,
200 7,
201 0,
202 __LINE__,
203 (ULONG_PTR)Entry);
204 }
205
206 /* This block must have a size */
207 if (!Entry->BlockSize)
208 {
209 /* Someone must've corrupted this field */
210 KeBugCheckEx(BAD_POOL_HEADER,
211 8,
212 0,
213 __LINE__,
214 (ULONG_PTR)Entry);
215 }
216
217 /* Okay, now get the next block */
218 NextEntry = POOL_NEXT_BLOCK(Entry);
219
220 /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
221 if (PAGE_ALIGN(NextEntry) != NextEntry)
222 {
223 /* The two blocks must be on the same page! */
224 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry))
225 {
226 /* Something is messed up */
227 KeBugCheckEx(BAD_POOL_HEADER,
228 9,
229 (ULONG_PTR)NextEntry,
230 __LINE__,
231 (ULONG_PTR)Entry);
232 }
233
234 /* And this block should think we are as large as we truly are */
235 if (NextEntry->PreviousSize != Entry->BlockSize)
236 {
237 /* Otherwise, someone corrupted the field */
238 KeBugCheckEx(BAD_POOL_HEADER,
239 5,
240 (ULONG_PTR)NextEntry,
241 __LINE__,
242 (ULONG_PTR)Entry);
243 }
244 }
245 }
246
247 VOID
248 NTAPI
249 ExpCheckPoolBlocks(IN PVOID Block)
250 {
251 BOOLEAN FoundBlock;
252 SIZE_T Size = 0;
253 PPOOL_HEADER Entry;
254
255 /* Get the first entry for this page, make sure it really is the first */
256 Entry = PAGE_ALIGN(Block);
257 ASSERT(Entry->PreviousSize == 0);
258
259 /* Now scan each entry */
260 while (TRUE)
261 {
262 /* When we actually found our block, remember this */
263 if (Entry == Block) FoundBlock = TRUE;
264
265 /* Now validate this block header */
266 ExpCheckPoolHeader(Entry);
267
268 /* And go to the next one, keeping track of our size */
269 Size += Entry->BlockSize;
270 Entry = POOL_NEXT_BLOCK(Entry);
271
272 /* If we hit the last block, stop */
273 if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break;
274
275 /* If we hit the end of the page, stop */
276 if (PAGE_ALIGN(Entry) == Entry) break;
277 }
278
279 /* We must've found our block, and we must have hit the end of the page */
280 if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock))
281 {
282 /* Otherwise, the blocks are messed up */
283 KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry);
284 }
285 }
286
287 /* PRIVATE FUNCTIONS **********************************************************/
288
289 VOID
290 NTAPI
291 INIT_FUNCTION
292 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor,
293 IN POOL_TYPE PoolType,
294 IN ULONG PoolIndex,
295 IN ULONG Threshold,
296 IN PVOID PoolLock)
297 {
298 PLIST_ENTRY NextEntry, LastEntry;
299
300 //
301 // Setup the descriptor based on the caller's request
302 //
303 PoolDescriptor->PoolType = PoolType;
304 PoolDescriptor->PoolIndex = PoolIndex;
305 PoolDescriptor->Threshold = Threshold;
306 PoolDescriptor->LockAddress = PoolLock;
307
308 //
309 // Initialize accounting data
310 //
311 PoolDescriptor->RunningAllocs = 0;
312 PoolDescriptor->RunningDeAllocs = 0;
313 PoolDescriptor->TotalPages = 0;
314 PoolDescriptor->TotalBytes = 0;
315 PoolDescriptor->TotalBigPages = 0;
316
317 //
318 // Nothing pending for now
319 //
320 PoolDescriptor->PendingFrees = NULL;
321 PoolDescriptor->PendingFreeDepth = 0;
322
323 //
324 // Loop all the descriptor's allocation lists and initialize them
325 //
326 NextEntry = PoolDescriptor->ListHeads;
327 LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
328 while (NextEntry < LastEntry)
329 {
330 ExpInitializePoolListHead(NextEntry);
331 NextEntry++;
332 }
333 }
334
335 VOID
336 NTAPI
337 INIT_FUNCTION
338 InitializePool(IN POOL_TYPE PoolType,
339 IN ULONG Threshold)
340 {
341 PPOOL_DESCRIPTOR Descriptor;
342
343 //
344 // Check what kind of pool this is
345 //
346 if (PoolType == NonPagedPool)
347 {
348 //
349 // Initialize the nonpaged pool descriptor
350 //
351 PoolVector[NonPagedPool] = &NonPagedPoolDescriptor;
352 ExInitializePoolDescriptor(PoolVector[NonPagedPool],
353 NonPagedPool,
354 0,
355 Threshold,
356 NULL);
357 }
358 else
359 {
360 //
361 // Allocate the pool descriptor
362 //
363 Descriptor = ExAllocatePoolWithTag(NonPagedPool,
364 sizeof(KGUARDED_MUTEX) +
365 sizeof(POOL_DESCRIPTOR),
366 'looP');
367 if (!Descriptor)
368 {
369 //
370 // This is really bad...
371 //
372 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
373 0,
374 -1,
375 -1,
376 -1);
377 }
378
379 //
380 // Setup the vector and guarded mutex for paged pool
381 //
382 PoolVector[PagedPool] = Descriptor;
383 ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1);
384 KeInitializeGuardedMutex(ExpPagedPoolMutex);
385 ExInitializePoolDescriptor(Descriptor,
386 PagedPool,
387 0,
388 Threshold,
389 ExpPagedPoolMutex);
390 }
391 }
392
393 FORCEINLINE
394 KIRQL
395 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
396 {
397 //
398 // Check if this is nonpaged pool
399 //
400 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
401 {
402 //
403 // Use the queued spin lock
404 //
405 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
406 }
407 else
408 {
409 //
410 // Use the guarded mutex
411 //
412 KeAcquireGuardedMutex(Descriptor->LockAddress);
413 return APC_LEVEL;
414 }
415 }
416
417 FORCEINLINE
418 VOID
419 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor,
420 IN KIRQL OldIrql)
421 {
422 //
423 // Check if this is nonpaged pool
424 //
425 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
426 {
427 //
428 // Use the queued spin lock
429 //
430 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
431 }
432 else
433 {
434 //
435 // Use the guarded mutex
436 //
437 KeReleaseGuardedMutex(Descriptor->LockAddress);
438 }
439 }
440
441 /* PUBLIC FUNCTIONS ***********************************************************/
442
443 /*
444 * @implemented
445 */
446 PVOID
447 NTAPI
448 ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
449 IN SIZE_T NumberOfBytes,
450 IN ULONG Tag)
451 {
452 PPOOL_DESCRIPTOR PoolDesc;
453 PLIST_ENTRY ListHead;
454 PPOOL_HEADER Entry, NextEntry, FragmentEntry;
455 KIRQL OldIrql;
456 ULONG BlockSize, i;
457
458 //
459 // Some sanity checks
460 //
461 ASSERT(Tag != 0);
462 ASSERT(Tag != ' GIB');
463 ASSERT(NumberOfBytes != 0);
464
465 //
466 // Get the pool type and its corresponding vector for this request
467 //
468 PoolType = PoolType & BASE_POOL_TYPE_MASK;
469 PoolDesc = PoolVector[PoolType];
470 ASSERT(PoolDesc != NULL);
471
472 //
473 // Check if this is a big page allocation
474 //
475 if (NumberOfBytes > POOL_MAX_ALLOC)
476 {
477 //
478 // Then just return the number of pages requested
479 //
480 return MiAllocatePoolPages(PoolType, NumberOfBytes);
481 }
482
483 //
484 // Should never request 0 bytes from the pool, but since so many drivers do
485 // it, we'll just assume they want 1 byte, based on NT's similar behavior
486 //
487 if (!NumberOfBytes) NumberOfBytes = 1;
488
489 //
490 // A pool allocation is defined by its data, a linked list to connect it to
491 // the free list (if necessary), and a pool header to store accounting info.
492 // Calculate this size, then convert it into a block size (units of pool
493 // headers)
494 //
495 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
496 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
497 // the direct allocation of pages.
498 //
499 i = (NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1)) / POOL_BLOCK_SIZE;
500
501 //
502 // Loop in the free lists looking for a block if this size. Start with the
503 // list optimized for this kind of size lookup
504 //
505 ListHead = &PoolDesc->ListHeads[i];
506 do
507 {
508 //
509 // Are there any free entries available on this list?
510 //
511 if (!ExpIsPoolListEmpty(ListHead))
512 {
513 //
514 // Acquire the pool lock now
515 //
516 OldIrql = ExLockPool(PoolDesc);
517
518 //
519 // And make sure the list still has entries
520 //
521 if (ExpIsPoolListEmpty(ListHead))
522 {
523 //
524 // Someone raced us (and won) before we had a chance to acquire
525 // the lock.
526 //
527 // Try again!
528 //
529 ExUnlockPool(PoolDesc, OldIrql);
530 ListHead++;
531 continue;
532 }
533
534 //
535 // Remove a free entry from the list
536 // Note that due to the way we insert free blocks into multiple lists
537 // there is a guarantee that any block on this list will either be
538 // of the correct size, or perhaps larger.
539 //
540 ExpCheckPoolLinks(ListHead);
541 Entry = POOL_ENTRY(ExpRemovePoolHeadList(ListHead));
542 ExpCheckPoolLinks(ListHead);
543 ExpCheckPoolBlocks(Entry);
544 ASSERT(Entry->BlockSize >= i);
545 ASSERT(Entry->PoolType == 0);
546
547 //
548 // Check if this block is larger that what we need. The block could
549 // not possibly be smaller, due to the reason explained above (and
550 // we would've asserted on a checked build if this was the case).
551 //
552 if (Entry->BlockSize != i)
553 {
554 //
555 // Is there an entry before this one?
556 //
557 if (Entry->PreviousSize == 0)
558 {
559 //
560 // There isn't anyone before us, so take the next block and
561 // turn it into a fragment that contains the leftover data
562 // that we don't need to satisfy the caller's request
563 //
564 FragmentEntry = POOL_BLOCK(Entry, i);
565 FragmentEntry->BlockSize = Entry->BlockSize - i;
566
567 //
568 // And make it point back to us
569 //
570 FragmentEntry->PreviousSize = i;
571
572 //
573 // Now get the block that follows the new fragment and check
574 // if it's still on the same page as us (and not at the end)
575 //
576 NextEntry = POOL_NEXT_BLOCK(FragmentEntry);
577 if (PAGE_ALIGN(NextEntry) != NextEntry)
578 {
579 //
580 // Adjust this next block to point to our newly created
581 // fragment block
582 //
583 NextEntry->PreviousSize = FragmentEntry->BlockSize;
584 }
585 }
586 else
587 {
588 //
589 // There is a free entry before us, which we know is smaller
590 // so we'll make this entry the fragment instead
591 //
592 FragmentEntry = Entry;
593
594 //
595 // And then we'll remove from it the actual size required.
596 // Now the entry is a leftover free fragment
597 //
598 Entry->BlockSize -= i;
599
600 //
601 // Now let's go to the next entry after the fragment (which
602 // used to point to our original free entry) and make it
603 // reference the new fragment entry instead.
604 //
605 // This is the entry that will actually end up holding the
606 // allocation!
607 //
608 Entry = POOL_NEXT_BLOCK(Entry);
609 Entry->PreviousSize = FragmentEntry->BlockSize;
610
611 //
612 // And now let's go to the entry after that one and check if
613 // it's still on the same page, and not at the end
614 //
615 NextEntry = POOL_BLOCK(Entry, i);
616 if (PAGE_ALIGN(NextEntry) != NextEntry)
617 {
618 //
619 // Make it reference the allocation entry
620 //
621 NextEntry->PreviousSize = i;
622 }
623 }
624
625 //
626 // Now our (allocation) entry is the right size
627 //
628 Entry->BlockSize = i;
629
630 //
631 // And the next entry is now the free fragment which contains
632 // the remaining difference between how big the original entry
633 // was, and the actual size the caller needs/requested.
634 //
635 FragmentEntry->PoolType = 0;
636 BlockSize = FragmentEntry->BlockSize;
637
638 //
639 // Now check if enough free bytes remained for us to have a
640 // "full" entry, which contains enough bytes for a linked list
641 // and thus can be used for allocations (up to 8 bytes...)
642 //
643 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
644 if (BlockSize != 1)
645 {
646 //
647 // Insert the free entry into the free list for this size
648 //
649 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
650 POOL_FREE_BLOCK(FragmentEntry));
651 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
652 }
653 }
654
655 //
656 // We have found an entry for this allocation, so set the pool type
657 // and release the lock since we're done
658 //
659 Entry->PoolType = PoolType + 1;
660 ExpCheckPoolBlocks(Entry);
661 ExUnlockPool(PoolDesc, OldIrql);
662
663 //
664 // Return the pool allocation
665 //
666 Entry->PoolTag = Tag;
667 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
668 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
669 return POOL_FREE_BLOCK(Entry);
670 }
671 } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
672
673 //
674 // There were no free entries left, so we have to allocate a new fresh page
675 //
676 Entry = MiAllocatePoolPages(PoolType, PAGE_SIZE);
677 ASSERT(Entry != NULL);
678 Entry->Ulong1 = 0;
679 Entry->BlockSize = i;
680 Entry->PoolType = PoolType + 1;
681
682 //
683 // This page will have two entries -- one for the allocation (which we just
684 // created above), and one for the remaining free bytes, which we're about
685 // to create now. The free bytes are the whole page minus what was allocated
686 // and then converted into units of block headers.
687 //
688 BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i;
689 FragmentEntry = POOL_BLOCK(Entry, i);
690 FragmentEntry->Ulong1 = 0;
691 FragmentEntry->BlockSize = BlockSize;
692 FragmentEntry->PreviousSize = i;
693
694 //
695 // Now check if enough free bytes remained for us to have a "full" entry,
696 // which contains enough bytes for a linked list and thus can be used for
697 // allocations (up to 8 bytes...)
698 //
699 if (FragmentEntry->BlockSize != 1)
700 {
701 //
702 // Excellent -- acquire the pool lock
703 //
704 OldIrql = ExLockPool(PoolDesc);
705
706 //
707 // And insert the free entry into the free list for this block size
708 //
709 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
710 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
711 POOL_FREE_BLOCK(FragmentEntry));
712 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
713
714 //
715 // Release the pool lock
716 //
717 ExpCheckPoolBlocks(Entry);
718 ExUnlockPool(PoolDesc, OldIrql);
719 }
720
721 //
722 // And return the pool allocation
723 //
724 ExpCheckPoolBlocks(Entry);
725 Entry->PoolTag = Tag;
726 return POOL_FREE_BLOCK(Entry);
727 }
728
729 /*
730 * @implemented
731 */
732 PVOID
733 NTAPI
734 ExAllocatePool(POOL_TYPE PoolType,
735 SIZE_T NumberOfBytes)
736 {
737 //
738 // Use a default tag of "None"
739 //
740 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, 'enoN');
741 }
742
743 /*
744 * @implemented
745 */
746 VOID
747 NTAPI
748 ExFreePoolWithTag(IN PVOID P,
749 IN ULONG TagToFree)
750 {
751 PPOOL_HEADER Entry, NextEntry;
752 ULONG BlockSize;
753 KIRQL OldIrql;
754 POOL_TYPE PoolType;
755 PPOOL_DESCRIPTOR PoolDesc;
756 BOOLEAN Combined = FALSE;
757
758 //
759 // Quickly deal with big page allocations
760 //
761 if (PAGE_ALIGN(P) == P)
762 {
763 MiFreePoolPages(P);
764 return;
765 }
766
767 //
768 // Get the entry for this pool allocation
769 // The pointer math here may look wrong or confusing, but it is quite right
770 //
771 Entry = P;
772 Entry--;
773
774 //
775 // Get the size of the entry, and it's pool type, then load the descriptor
776 // for this pool type
777 //
778 BlockSize = Entry->BlockSize;
779 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
780 PoolDesc = PoolVector[PoolType];
781
782 //
783 // Get the pointer to the next entry
784 //
785 NextEntry = POOL_BLOCK(Entry, BlockSize);
786
787 //
788 // Acquire the pool lock
789 //
790 OldIrql = ExLockPool(PoolDesc);
791
792 //
793 // Check if the next allocation is at the end of the page
794 //
795 ExpCheckPoolBlocks(Entry);
796 if (PAGE_ALIGN(NextEntry) != NextEntry)
797 {
798 //
799 // We may be able to combine the block if it's free
800 //
801 if (NextEntry->PoolType == 0)
802 {
803 //
804 // The next block is free, so we'll do a combine
805 //
806 Combined = TRUE;
807
808 //
809 // Make sure there's actual data in the block -- anything smaller
810 // than this means we only have the header, so there's no linked list
811 // for us to remove
812 //
813 if ((NextEntry->BlockSize != 1))
814 {
815 //
816 // The block is at least big enough to have a linked list, so go
817 // ahead and remove it
818 //
819 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
820 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
821 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
822 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
823 }
824
825 //
826 // Our entry is now combined with the next entry
827 //
828 Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
829 }
830 }
831
832 //
833 // Now check if there was a previous entry on the same page as us
834 //
835 if (Entry->PreviousSize)
836 {
837 //
838 // Great, grab that entry and check if it's free
839 //
840 NextEntry = POOL_PREV_BLOCK(Entry);
841 if (NextEntry->PoolType == 0)
842 {
843 //
844 // It is, so we can do a combine
845 //
846 Combined = TRUE;
847
848 //
849 // Make sure there's actual data in the block -- anything smaller
850 // than this means we only have the header so there's no linked list
851 // for us to remove
852 //
853 if ((NextEntry->BlockSize != 1))
854 {
855 //
856 // The block is at least big enough to have a linked list, so go
857 // ahead and remove it
858 //
859 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
860 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
861 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
862 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
863 }
864
865 //
866 // Combine our original block (which might've already been combined
867 // with the next block), into the previous block
868 //
869 NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
870
871 //
872 // And now we'll work with the previous block instead
873 //
874 Entry = NextEntry;
875 }
876 }
877
878 //
879 // By now, it may have been possible for our combined blocks to actually
880 // have made up a full page (if there were only 2-3 allocations on the
881 // page, they could've all been combined).
882 //
883 if ((PAGE_ALIGN(Entry) == Entry) &&
884 (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry)) == POOL_NEXT_BLOCK(Entry)))
885 {
886 //
887 // In this case, release the pool lock, and free the page
888 //
889 ExUnlockPool(PoolDesc, OldIrql);
890 MiFreePoolPages(Entry);
891 return;
892 }
893
894 //
895 // Otherwise, we now have a free block (or a combination of 2 or 3)
896 //
897 Entry->PoolType = 0;
898 BlockSize = Entry->BlockSize;
899 ASSERT(BlockSize != 1);
900
901 //
902 // Check if we actually did combine it with anyone
903 //
904 if (Combined)
905 {
906 //
907 // Get the first combined block (either our original to begin with, or
908 // the one after the original, depending if we combined with the previous)
909 //
910 NextEntry = POOL_NEXT_BLOCK(Entry);
911
912 //
913 // As long as the next block isn't on a page boundary, have it point
914 // back to us
915 //
916 if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
917 }
918
919 //
920 // Insert this new free block, and release the pool lock
921 //
922 ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
923 ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry));
924 ExUnlockPool(PoolDesc, OldIrql);
925 }
926
927 /*
928 * @implemented
929 */
930 VOID
931 NTAPI
932 ExFreePool(PVOID P)
933 {
934 //
935 // Just free without checking for the tag
936 //
937 ExFreePoolWithTag(P, 0);
938 }
939
940 /*
941 * @unimplemented
942 */
943 SIZE_T
944 NTAPI
945 ExQueryPoolBlockSize(IN PVOID PoolBlock,
946 OUT PBOOLEAN QuotaCharged)
947 {
948 //
949 // Not implemented
950 //
951 UNIMPLEMENTED;
952 return FALSE;
953 }
954
955 /*
956 * @implemented
957 */
958
959 PVOID
960 NTAPI
961 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType,
962 IN SIZE_T NumberOfBytes)
963 {
964 //
965 // Allocate the pool
966 //
967 return ExAllocatePoolWithQuotaTag(PoolType, NumberOfBytes, 'enoN');
968 }
969
970 /*
971 * @implemented
972 */
973 PVOID
974 NTAPI
975 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType,
976 IN SIZE_T NumberOfBytes,
977 IN ULONG Tag,
978 IN EX_POOL_PRIORITY Priority)
979 {
980 //
981 // Allocate the pool
982 //
983 UNIMPLEMENTED;
984 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
985 }
986
987 /*
988 * @implemented
989 */
990 PVOID
991 NTAPI
992 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType,
993 IN SIZE_T NumberOfBytes,
994 IN ULONG Tag)
995 {
996 //
997 // Allocate the pool
998 //
999 UNIMPLEMENTED;
1000 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
1001 }
1002
1003 /* EOF */