[KERNEL32]
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / expool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::EXPOOL"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
18
19 #undef ExAllocatePoolWithQuota
20 #undef ExAllocatePoolWithQuotaTag
21
22 BOOLEAN AllowPagedPool = TRUE;
23
24 /* GLOBALS ********************************************************************/
25
26 ULONG ExpNumberOfPagedPools;
27 POOL_DESCRIPTOR NonPagedPoolDescriptor;
28 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16 + 1];
29 PPOOL_DESCRIPTOR PoolVector[2];
30 PVOID PoolTrackTable;
31 PKGUARDED_MUTEX ExpPagedPoolMutex;
32
33 /* Pool block/header/list access macros */
34 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
35 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER))
36 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
37 #define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
38 #define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize))
39
40 /*
41 * Pool list access debug macros, similar to Arthur's pfnlist.c work.
42 * Microsoft actually implements similar checks in the Windows Server 2003 SP1
43 * pool code, but only for checked builds.
44 *
45 * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
46 * that these checks are done even on retail builds, due to the increasing
47 * number of kernel-mode attacks which depend on dangling list pointers and other
48 * kinds of list-based attacks.
49 *
50 * For now, I will leave these checks on all the time, but later they are likely
51 * to be DBG-only, at least until there are enough kernel-mode security attacks
52 * against ReactOS to warrant the performance hit.
53 *
54 * For now, these are not made inline, so we can get good stack traces.
55 */
56 PLIST_ENTRY
57 NTAPI
58 ExpDecodePoolLink(IN PLIST_ENTRY Link)
59 {
60 return (PLIST_ENTRY)((ULONG_PTR)Link & ~1);
61 }
62
63 PLIST_ENTRY
64 NTAPI
65 ExpEncodePoolLink(IN PLIST_ENTRY Link)
66 {
67 return (PLIST_ENTRY)((ULONG_PTR)Link | 1);
68 }
69
70 VOID
71 NTAPI
72 ExpCheckPoolLinks(IN PLIST_ENTRY ListHead)
73 {
74 if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) ||
75 (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead))
76 {
77 KeBugCheckEx(BAD_POOL_HEADER,
78 3,
79 (ULONG_PTR)ListHead,
80 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink),
81 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink));
82 }
83 }
84
85 VOID
86 NTAPI
87 ExpInitializePoolListHead(IN PLIST_ENTRY ListHead)
88 {
89 ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead);
90 }
91
92 BOOLEAN
93 NTAPI
94 ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead)
95 {
96 return (ExpDecodePoolLink(ListHead->Flink) == ListHead);
97 }
98
99 VOID
100 NTAPI
101 ExpRemovePoolEntryList(IN PLIST_ENTRY Entry)
102 {
103 PLIST_ENTRY Blink, Flink;
104 Flink = ExpDecodePoolLink(Entry->Flink);
105 Blink = ExpDecodePoolLink(Entry->Blink);
106 Flink->Blink = ExpEncodePoolLink(Blink);
107 Blink->Flink = ExpEncodePoolLink(Flink);
108 }
109
110 PLIST_ENTRY
111 NTAPI
112 ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead)
113 {
114 PLIST_ENTRY Entry, Flink;
115 Entry = ExpDecodePoolLink(ListHead->Flink);
116 Flink = ExpDecodePoolLink(Entry->Flink);
117 ListHead->Flink = ExpEncodePoolLink(Flink);
118 Flink->Blink = ExpEncodePoolLink(ListHead);
119 return Entry;
120 }
121
122 PLIST_ENTRY
123 NTAPI
124 ExpRemovePoolTailList(IN PLIST_ENTRY ListHead)
125 {
126 PLIST_ENTRY Entry, Blink;
127 Entry = ExpDecodePoolLink(ListHead->Blink);
128 Blink = ExpDecodePoolLink(Entry->Blink);
129 ListHead->Blink = ExpEncodePoolLink(Blink);
130 Blink->Flink = ExpEncodePoolLink(ListHead);
131 return Entry;
132 }
133
134 VOID
135 NTAPI
136 ExpInsertPoolTailList(IN PLIST_ENTRY ListHead,
137 IN PLIST_ENTRY Entry)
138 {
139 PLIST_ENTRY Blink;
140 ExpCheckPoolLinks(ListHead);
141 Blink = ExpDecodePoolLink(ListHead->Blink);
142 Entry->Flink = ExpEncodePoolLink(ListHead);
143 Entry->Blink = ExpEncodePoolLink(Blink);
144 Blink->Flink = ExpEncodePoolLink(Entry);
145 ListHead->Blink = ExpEncodePoolLink(Entry);
146 ExpCheckPoolLinks(ListHead);
147 }
148
149 VOID
150 NTAPI
151 ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead,
152 IN PLIST_ENTRY Entry)
153 {
154 PLIST_ENTRY Flink;
155 ExpCheckPoolLinks(ListHead);
156 Flink = ExpDecodePoolLink(ListHead->Flink);
157 Entry->Flink = ExpEncodePoolLink(Flink);
158 Entry->Blink = ExpEncodePoolLink(ListHead);
159 Flink->Blink = ExpEncodePoolLink(Entry);
160 ListHead->Flink = ExpEncodePoolLink(Entry);
161 ExpCheckPoolLinks(ListHead);
162 }
163
164 VOID
165 NTAPI
166 ExpCheckPoolHeader(IN PPOOL_HEADER Entry)
167 {
168 PPOOL_HEADER PreviousEntry, NextEntry;
169
170 /* Is there a block before this one? */
171 if (Entry->PreviousSize)
172 {
173 /* Get it */
174 PreviousEntry = POOL_PREV_BLOCK(Entry);
175
176 /* The two blocks must be on the same page! */
177 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry))
178 {
179 /* Something is awry */
180 KeBugCheckEx(BAD_POOL_HEADER,
181 6,
182 (ULONG_PTR)PreviousEntry,
183 __LINE__,
184 (ULONG_PTR)Entry);
185 }
186
187 /* This block should also indicate that it's as large as we think it is */
188 if (PreviousEntry->BlockSize != Entry->PreviousSize)
189 {
190 /* Otherwise, someone corrupted one of the sizes */
191 KeBugCheckEx(BAD_POOL_HEADER,
192 5,
193 (ULONG_PTR)PreviousEntry,
194 __LINE__,
195 (ULONG_PTR)Entry);
196 }
197 }
198 else if (PAGE_ALIGN(Entry) != Entry)
199 {
200 /* If there's no block before us, we are the first block, so we should be on a page boundary */
201 KeBugCheckEx(BAD_POOL_HEADER,
202 7,
203 0,
204 __LINE__,
205 (ULONG_PTR)Entry);
206 }
207
208 /* This block must have a size */
209 if (!Entry->BlockSize)
210 {
211 /* Someone must've corrupted this field */
212 KeBugCheckEx(BAD_POOL_HEADER,
213 8,
214 0,
215 __LINE__,
216 (ULONG_PTR)Entry);
217 }
218
219 /* Okay, now get the next block */
220 NextEntry = POOL_NEXT_BLOCK(Entry);
221
222 /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
223 if (PAGE_ALIGN(NextEntry) != NextEntry)
224 {
225 /* The two blocks must be on the same page! */
226 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry))
227 {
228 /* Something is messed up */
229 KeBugCheckEx(BAD_POOL_HEADER,
230 9,
231 (ULONG_PTR)NextEntry,
232 __LINE__,
233 (ULONG_PTR)Entry);
234 }
235
236 /* And this block should think we are as large as we truly are */
237 if (NextEntry->PreviousSize != Entry->BlockSize)
238 {
239 /* Otherwise, someone corrupted the field */
240 KeBugCheckEx(BAD_POOL_HEADER,
241 5,
242 (ULONG_PTR)NextEntry,
243 __LINE__,
244 (ULONG_PTR)Entry);
245 }
246 }
247 }
248
249 VOID
250 NTAPI
251 ExpCheckPoolBlocks(IN PVOID Block)
252 {
253 BOOLEAN FoundBlock;
254 SIZE_T Size = 0;
255 PPOOL_HEADER Entry;
256
257 /* Get the first entry for this page, make sure it really is the first */
258 Entry = PAGE_ALIGN(Block);
259 ASSERT(Entry->PreviousSize == 0);
260
261 /* Now scan each entry */
262 while (TRUE)
263 {
264 /* When we actually found our block, remember this */
265 if (Entry == Block) FoundBlock = TRUE;
266
267 /* Now validate this block header */
268 ExpCheckPoolHeader(Entry);
269
270 /* And go to the next one, keeping track of our size */
271 Size += Entry->BlockSize;
272 Entry = POOL_NEXT_BLOCK(Entry);
273
274 /* If we hit the last block, stop */
275 if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break;
276
277 /* If we hit the end of the page, stop */
278 if (PAGE_ALIGN(Entry) == Entry) break;
279 }
280
281 /* We must've found our block, and we must have hit the end of the page */
282 if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock))
283 {
284 /* Otherwise, the blocks are messed up */
285 KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry);
286 }
287 }
288
289 /* PRIVATE FUNCTIONS **********************************************************/
290
291 VOID
292 NTAPI
293 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor,
294 IN POOL_TYPE PoolType,
295 IN ULONG PoolIndex,
296 IN ULONG Threshold,
297 IN PVOID PoolLock)
298 {
299 PLIST_ENTRY NextEntry, LastEntry;
300
301 //
302 // Setup the descriptor based on the caller's request
303 //
304 PoolDescriptor->PoolType = PoolType;
305 PoolDescriptor->PoolIndex = PoolIndex;
306 PoolDescriptor->Threshold = Threshold;
307 PoolDescriptor->LockAddress = PoolLock;
308
309 //
310 // Initialize accounting data
311 //
312 PoolDescriptor->RunningAllocs = 0;
313 PoolDescriptor->RunningDeAllocs = 0;
314 PoolDescriptor->TotalPages = 0;
315 PoolDescriptor->TotalBytes = 0;
316 PoolDescriptor->TotalBigPages = 0;
317
318 //
319 // Nothing pending for now
320 //
321 PoolDescriptor->PendingFrees = NULL;
322 PoolDescriptor->PendingFreeDepth = 0;
323
324 //
325 // Loop all the descriptor's allocation lists and initialize them
326 //
327 NextEntry = PoolDescriptor->ListHeads;
328 LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
329 while (NextEntry < LastEntry)
330 {
331 ExpInitializePoolListHead(NextEntry);
332 NextEntry++;
333 }
334 }
335
336 VOID
337 NTAPI
338 InitializePool(IN POOL_TYPE PoolType,
339 IN ULONG Threshold)
340 {
341 PPOOL_DESCRIPTOR Descriptor;
342
343 //
344 // Check what kind of pool this is
345 //
346 if (PoolType == NonPagedPool)
347 {
348 //
349 // Initialize the nonpaged pool descriptor
350 //
351 PoolVector[NonPagedPool] = &NonPagedPoolDescriptor;
352 ExInitializePoolDescriptor(PoolVector[NonPagedPool],
353 NonPagedPool,
354 0,
355 Threshold,
356 NULL);
357 }
358 else
359 {
360 //
361 // Allocate the pool descriptor
362 //
363 Descriptor = ExAllocatePoolWithTag(NonPagedPool,
364 sizeof(KGUARDED_MUTEX) +
365 sizeof(POOL_DESCRIPTOR),
366 'looP');
367 if (!Descriptor)
368 {
369 //
370 // This is really bad...
371 //
372 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
373 0,
374 -1,
375 -1,
376 -1);
377 }
378
379 //
380 // Setup the vector and guarded mutex for paged pool
381 //
382 PoolVector[PagedPool] = Descriptor;
383 ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1);
384 KeInitializeGuardedMutex(ExpPagedPoolMutex);
385 ExInitializePoolDescriptor(Descriptor,
386 PagedPool,
387 0,
388 Threshold,
389 ExpPagedPoolMutex);
390 }
391 }
392
393 FORCEINLINE
394 KIRQL
395 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
396 {
397 //
398 // Check if this is nonpaged pool
399 //
400 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
401 {
402 //
403 // Use the queued spin lock
404 //
405 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
406 }
407 else
408 {
409 //
410 // Use the guarded mutex
411 //
412 KeAcquireGuardedMutex(Descriptor->LockAddress);
413 return APC_LEVEL;
414 }
415 }
416
417 FORCEINLINE
418 VOID
419 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor,
420 IN KIRQL OldIrql)
421 {
422 //
423 // Check if this is nonpaged pool
424 //
425 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
426 {
427 //
428 // Use the queued spin lock
429 //
430 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
431 }
432 else
433 {
434 //
435 // Use the guarded mutex
436 //
437 KeReleaseGuardedMutex(Descriptor->LockAddress);
438 }
439 }
440
441 /* PUBLIC FUNCTIONS ***********************************************************/
442
443 /*
444 * @implemented
445 */
446 PVOID
447 NTAPI
448 ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
449 IN SIZE_T NumberOfBytes,
450 IN ULONG Tag)
451 {
452 PPOOL_DESCRIPTOR PoolDesc;
453 PLIST_ENTRY ListHead;
454 PPOOL_HEADER Entry, NextEntry, FragmentEntry;
455 KIRQL OldIrql;
456 ULONG BlockSize, i;
457
458 //
459 // Check for paged pool
460 //
461 if (!(AllowPagedPool) && (PoolType == PagedPool)) return ExAllocatePagedPoolWithTag(PagedPool, NumberOfBytes, Tag);
462
463 //
464 // Some sanity checks
465 //
466 ASSERT(Tag != 0);
467 ASSERT(Tag != ' GIB');
468 ASSERT(NumberOfBytes != 0);
469
470 //
471 // Get the pool type and its corresponding vector for this request
472 //
473 PoolType = PoolType & BASE_POOL_TYPE_MASK;
474 PoolDesc = PoolVector[PoolType];
475 ASSERT(PoolDesc != NULL);
476
477 //
478 // Check if this is a big page allocation
479 //
480 if (NumberOfBytes > POOL_MAX_ALLOC)
481 {
482 //
483 // Then just return the number of pages requested
484 //
485 return MiAllocatePoolPages(PoolType, NumberOfBytes);
486 }
487
488 //
489 // Should never request 0 bytes from the pool, but since so many drivers do
490 // it, we'll just assume they want 1 byte, based on NT's similar behavior
491 //
492 if (!NumberOfBytes) NumberOfBytes = 1;
493
494 //
495 // A pool allocation is defined by its data, a linked list to connect it to
496 // the free list (if necessary), and a pool header to store accounting info.
497 // Calculate this size, then convert it into a block size (units of pool
498 // headers)
499 //
500 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
501 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
502 // the direct allocation of pages.
503 //
504 i = (NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1)) / POOL_BLOCK_SIZE;
505
506 //
507 // Loop in the free lists looking for a block if this size. Start with the
508 // list optimized for this kind of size lookup
509 //
510 ListHead = &PoolDesc->ListHeads[i];
511 do
512 {
513 //
514 // Are there any free entries available on this list?
515 //
516 if (!ExpIsPoolListEmpty(ListHead))
517 {
518 //
519 // Acquire the pool lock now
520 //
521 OldIrql = ExLockPool(PoolDesc);
522
523 //
524 // And make sure the list still has entries
525 //
526 if (ExpIsPoolListEmpty(ListHead))
527 {
528 //
529 // Someone raced us (and won) before we had a chance to acquire
530 // the lock.
531 //
532 // Try again!
533 //
534 ExUnlockPool(PoolDesc, OldIrql);
535 ListHead++;
536 continue;
537 }
538
539 //
540 // Remove a free entry from the list
541 // Note that due to the way we insert free blocks into multiple lists
542 // there is a guarantee that any block on this list will either be
543 // of the correct size, or perhaps larger.
544 //
545 ExpCheckPoolLinks(ListHead);
546 Entry = POOL_ENTRY(ExpRemovePoolHeadList(ListHead));
547 ExpCheckPoolLinks(ListHead);
548 ExpCheckPoolBlocks(Entry);
549 ASSERT(Entry->BlockSize >= i);
550 ASSERT(Entry->PoolType == 0);
551
552 //
553 // Check if this block is larger that what we need. The block could
554 // not possibly be smaller, due to the reason explained above (and
555 // we would've asserted on a checked build if this was the case).
556 //
557 if (Entry->BlockSize != i)
558 {
559 //
560 // Is there an entry before this one?
561 //
562 if (Entry->PreviousSize == 0)
563 {
564 //
565 // There isn't anyone before us, so take the next block and
566 // turn it into a fragment that contains the leftover data
567 // that we don't need to satisfy the caller's request
568 //
569 FragmentEntry = POOL_BLOCK(Entry, i);
570 FragmentEntry->BlockSize = Entry->BlockSize - i;
571
572 //
573 // And make it point back to us
574 //
575 FragmentEntry->PreviousSize = i;
576
577 //
578 // Now get the block that follows the new fragment and check
579 // if it's still on the same page as us (and not at the end)
580 //
581 NextEntry = POOL_NEXT_BLOCK(FragmentEntry);
582 if (PAGE_ALIGN(NextEntry) != NextEntry)
583 {
584 //
585 // Adjust this next block to point to our newly created
586 // fragment block
587 //
588 NextEntry->PreviousSize = FragmentEntry->BlockSize;
589 }
590 }
591 else
592 {
593 //
594 // There is a free entry before us, which we know is smaller
595 // so we'll make this entry the fragment instead
596 //
597 FragmentEntry = Entry;
598
599 //
600 // And then we'll remove from it the actual size required.
601 // Now the entry is a leftover free fragment
602 //
603 Entry->BlockSize -= i;
604
605 //
606 // Now let's go to the next entry after the fragment (which
607 // used to point to our original free entry) and make it
608 // reference the new fragment entry instead.
609 //
610 // This is the entry that will actually end up holding the
611 // allocation!
612 //
613 Entry = POOL_NEXT_BLOCK(Entry);
614 Entry->PreviousSize = FragmentEntry->BlockSize;
615
616 //
617 // And now let's go to the entry after that one and check if
618 // it's still on the same page, and not at the end
619 //
620 NextEntry = POOL_BLOCK(Entry, i);
621 if (PAGE_ALIGN(NextEntry) != NextEntry)
622 {
623 //
624 // Make it reference the allocation entry
625 //
626 NextEntry->PreviousSize = i;
627 }
628 }
629
630 //
631 // Now our (allocation) entry is the right size
632 //
633 Entry->BlockSize = i;
634
635 //
636 // And the next entry is now the free fragment which contains
637 // the remaining difference between how big the original entry
638 // was, and the actual size the caller needs/requested.
639 //
640 FragmentEntry->PoolType = 0;
641 BlockSize = FragmentEntry->BlockSize;
642
643 //
644 // Now check if enough free bytes remained for us to have a
645 // "full" entry, which contains enough bytes for a linked list
646 // and thus can be used for allocations (up to 8 bytes...)
647 //
648 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
649 if (BlockSize != 1)
650 {
651 //
652 // Insert the free entry into the free list for this size
653 //
654 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
655 POOL_FREE_BLOCK(FragmentEntry));
656 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
657 }
658 }
659
660 //
661 // We have found an entry for this allocation, so set the pool type
662 // and release the lock since we're done
663 //
664 Entry->PoolType = PoolType + 1;
665 ExpCheckPoolBlocks(Entry);
666 ExUnlockPool(PoolDesc, OldIrql);
667
668 //
669 // Return the pool allocation
670 //
671 Entry->PoolTag = Tag;
672 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
673 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
674 return POOL_FREE_BLOCK(Entry);
675 }
676 } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
677
678 //
679 // There were no free entries left, so we have to allocate a new fresh page
680 //
681 Entry = MiAllocatePoolPages(PoolType, PAGE_SIZE);
682 ASSERT(Entry != NULL);
683 Entry->Ulong1 = 0;
684 Entry->BlockSize = i;
685 Entry->PoolType = PoolType + 1;
686
687 //
688 // This page will have two entries -- one for the allocation (which we just
689 // created above), and one for the remaining free bytes, which we're about
690 // to create now. The free bytes are the whole page minus what was allocated
691 // and then converted into units of block headers.
692 //
693 BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i;
694 FragmentEntry = POOL_BLOCK(Entry, i);
695 FragmentEntry->Ulong1 = 0;
696 FragmentEntry->BlockSize = BlockSize;
697 FragmentEntry->PreviousSize = i;
698
699 //
700 // Now check if enough free bytes remained for us to have a "full" entry,
701 // which contains enough bytes for a linked list and thus can be used for
702 // allocations (up to 8 bytes...)
703 //
704 if (FragmentEntry->BlockSize != 1)
705 {
706 //
707 // Excellent -- acquire the pool lock
708 //
709 OldIrql = ExLockPool(PoolDesc);
710
711 //
712 // And insert the free entry into the free list for this block size
713 //
714 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
715 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
716 POOL_FREE_BLOCK(FragmentEntry));
717 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
718
719 //
720 // Release the pool lock
721 //
722 ExpCheckPoolBlocks(Entry);
723 ExUnlockPool(PoolDesc, OldIrql);
724 }
725
726 //
727 // And return the pool allocation
728 //
729 ExpCheckPoolBlocks(Entry);
730 Entry->PoolTag = Tag;
731 return POOL_FREE_BLOCK(Entry);
732 }
733
734 /*
735 * @implemented
736 */
737 PVOID
738 NTAPI
739 ExAllocatePool(POOL_TYPE PoolType,
740 SIZE_T NumberOfBytes)
741 {
742 //
743 // Use a default tag of "None"
744 //
745 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, 'enoN');
746 }
747
748 /*
749 * @implemented
750 */
751 VOID
752 NTAPI
753 ExFreePoolWithTag(IN PVOID P,
754 IN ULONG TagToFree)
755 {
756 PPOOL_HEADER Entry, NextEntry;
757 ULONG BlockSize;
758 KIRQL OldIrql;
759 POOL_TYPE PoolType;
760 PPOOL_DESCRIPTOR PoolDesc;
761 BOOLEAN Combined = FALSE;
762
763 //
764 // Check for paged pool
765 //
766 if ((P >= MmPagedPoolBase) &&
767 (P <= (PVOID)((ULONG_PTR)MmPagedPoolBase + MmPagedPoolSize)))
768 {
769 //
770 // Use old allocator
771 //
772 ExFreePagedPool(P);
773 return;
774 }
775
776 //
777 // Quickly deal with big page allocations
778 //
779 if (PAGE_ALIGN(P) == P)
780 {
781 MiFreePoolPages(P);
782 return;
783 }
784
785 //
786 // Get the entry for this pool allocation
787 // The pointer math here may look wrong or confusing, but it is quite right
788 //
789 Entry = P;
790 Entry--;
791
792 //
793 // Get the size of the entry, and it's pool type, then load the descriptor
794 // for this pool type
795 //
796 BlockSize = Entry->BlockSize;
797 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
798 PoolDesc = PoolVector[PoolType];
799
800 //
801 // Get the pointer to the next entry
802 //
803 NextEntry = POOL_BLOCK(Entry, BlockSize);
804
805 //
806 // Acquire the pool lock
807 //
808 OldIrql = ExLockPool(PoolDesc);
809
810 //
811 // Check if the next allocation is at the end of the page
812 //
813 ExpCheckPoolBlocks(Entry);
814 if (PAGE_ALIGN(NextEntry) != NextEntry)
815 {
816 //
817 // We may be able to combine the block if it's free
818 //
819 if (NextEntry->PoolType == 0)
820 {
821 //
822 // The next block is free, so we'll do a combine
823 //
824 Combined = TRUE;
825
826 //
827 // Make sure there's actual data in the block -- anything smaller
828 // than this means we only have the header, so there's no linked list
829 // for us to remove
830 //
831 if ((NextEntry->BlockSize != 1))
832 {
833 //
834 // The block is at least big enough to have a linked list, so go
835 // ahead and remove it
836 //
837 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
838 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
839 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
840 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
841 }
842
843 //
844 // Our entry is now combined with the next entry
845 //
846 Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
847 }
848 }
849
850 //
851 // Now check if there was a previous entry on the same page as us
852 //
853 if (Entry->PreviousSize)
854 {
855 //
856 // Great, grab that entry and check if it's free
857 //
858 NextEntry = POOL_PREV_BLOCK(Entry);
859 if (NextEntry->PoolType == 0)
860 {
861 //
862 // It is, so we can do a combine
863 //
864 Combined = TRUE;
865
866 //
867 // Make sure there's actual data in the block -- anything smaller
868 // than this means we only have the header so there's no linked list
869 // for us to remove
870 //
871 if ((NextEntry->BlockSize != 1))
872 {
873 //
874 // The block is at least big enough to have a linked list, so go
875 // ahead and remove it
876 //
877 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
878 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
879 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
880 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
881 }
882
883 //
884 // Combine our original block (which might've already been combined
885 // with the next block), into the previous block
886 //
887 NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
888
889 //
890 // And now we'll work with the previous block instead
891 //
892 Entry = NextEntry;
893 }
894 }
895
896 //
897 // By now, it may have been possible for our combined blocks to actually
898 // have made up a full page (if there were only 2-3 allocations on the
899 // page, they could've all been combined).
900 //
901 if ((PAGE_ALIGN(Entry) == Entry) &&
902 (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry)) == POOL_NEXT_BLOCK(Entry)))
903 {
904 //
905 // In this case, release the pool lock, and free the page
906 //
907 ExUnlockPool(PoolDesc, OldIrql);
908 MiFreePoolPages(Entry);
909 return;
910 }
911
912 //
913 // Otherwise, we now have a free block (or a combination of 2 or 3)
914 //
915 Entry->PoolType = 0;
916 BlockSize = Entry->BlockSize;
917 ASSERT(BlockSize != 1);
918
919 //
920 // Check if we actually did combine it with anyone
921 //
922 if (Combined)
923 {
924 //
925 // Get the first combined block (either our original to begin with, or
926 // the one after the original, depending if we combined with the previous)
927 //
928 NextEntry = POOL_NEXT_BLOCK(Entry);
929
930 //
931 // As long as the next block isn't on a page boundary, have it point
932 // back to us
933 //
934 if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
935 }
936
937 //
938 // Insert this new free block, and release the pool lock
939 //
940 ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
941 ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry));
942 ExUnlockPool(PoolDesc, OldIrql);
943 }
944
945 /*
946 * @implemented
947 */
948 VOID
949 NTAPI
950 ExFreePool(PVOID P)
951 {
952 //
953 // Just free without checking for the tag
954 //
955 ExFreePoolWithTag(P, 0);
956 }
957
958 /*
959 * @unimplemented
960 */
961 SIZE_T
962 NTAPI
963 ExQueryPoolBlockSize(IN PVOID PoolBlock,
964 OUT PBOOLEAN QuotaCharged)
965 {
966 //
967 // Not implemented
968 //
969 UNIMPLEMENTED;
970 return FALSE;
971 }
972
973 /*
974 * @implemented
975 */
976
977 PVOID
978 NTAPI
979 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType,
980 IN SIZE_T NumberOfBytes)
981 {
982 //
983 // Allocate the pool
984 //
985 return ExAllocatePoolWithQuotaTag(PoolType, NumberOfBytes, 'enoN');
986 }
987
988 /*
989 * @implemented
990 */
991 PVOID
992 NTAPI
993 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType,
994 IN SIZE_T NumberOfBytes,
995 IN ULONG Tag,
996 IN EX_POOL_PRIORITY Priority)
997 {
998 //
999 // Allocate the pool
1000 //
1001 UNIMPLEMENTED;
1002 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
1003 }
1004
1005 /*
1006 * @implemented
1007 */
1008 PVOID
1009 NTAPI
1010 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType,
1011 IN SIZE_T NumberOfBytes,
1012 IN ULONG Tag)
1013 {
1014 //
1015 // Allocate the pool
1016 //
1017 UNIMPLEMENTED;
1018 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
1019 }
1020
1021 /* EOF */