[NTOS]: Use logical math operations on the various block<->entry<->free_list_head...
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / expool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::EXPOOL"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
18
19 #undef ExAllocatePoolWithQuota
20 #undef ExAllocatePoolWithQuotaTag
21
22 /* GLOBALS ********************************************************************/
23
24 ULONG ExpNumberOfPagedPools;
25 POOL_DESCRIPTOR NonPagedPoolDescriptor;
26 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16 + 1];
27 PPOOL_DESCRIPTOR PoolVector[2];
28 PVOID PoolTrackTable;
29 PKGUARDED_MUTEX ExpPagedPoolMutex;
30
31 /* Pool block/header/list access macros */
32 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)x - sizeof(POOL_HEADER))
33 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)x + sizeof(POOL_HEADER))
34 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)x + ((i) * POOL_BLOCK_SIZE))
35 #define POOL_NEXT_BLOCK(x) POOL_BLOCK(x, x->BlockSize)
36 #define POOL_PREV_BLOCK(x) POOL_BLOCK(x, -x->PreviousSize)
37
38 /* PRIVATE FUNCTIONS **********************************************************/
39
40 VOID
41 NTAPI
42 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor,
43 IN POOL_TYPE PoolType,
44 IN ULONG PoolIndex,
45 IN ULONG Threshold,
46 IN PVOID PoolLock)
47 {
48 PLIST_ENTRY NextEntry, LastEntry;
49
50 //
51 // Setup the descriptor based on the caller's request
52 //
53 PoolDescriptor->PoolType = PoolType;
54 PoolDescriptor->PoolIndex = PoolIndex;
55 PoolDescriptor->Threshold = Threshold;
56 PoolDescriptor->LockAddress = PoolLock;
57
58 //
59 // Initialize accounting data
60 //
61 PoolDescriptor->RunningAllocs = 0;
62 PoolDescriptor->RunningDeAllocs = 0;
63 PoolDescriptor->TotalPages = 0;
64 PoolDescriptor->TotalBytes = 0;
65 PoolDescriptor->TotalBigPages = 0;
66
67 //
68 // Nothing pending for now
69 //
70 PoolDescriptor->PendingFrees = NULL;
71 PoolDescriptor->PendingFreeDepth = 0;
72
73 //
74 // Loop all the descriptor's allocation lists and initialize them
75 //
76 NextEntry = PoolDescriptor->ListHeads;
77 LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
78 while (NextEntry < LastEntry)
79 {
80 InitializeListHead(NextEntry);
81 NextEntry++;
82 }
83 }
84
85 VOID
86 NTAPI
87 InitializePool(IN POOL_TYPE PoolType,
88 IN ULONG Threshold)
89 {
90 PPOOL_DESCRIPTOR Descriptor;
91
92 //
93 // Check what kind of pool this is
94 //
95 if (PoolType == NonPagedPool)
96 {
97 //
98 // Initialize the nonpaged pool descriptor
99 //
100 PoolVector[NonPagedPool] = &NonPagedPoolDescriptor;
101 ExInitializePoolDescriptor(PoolVector[NonPagedPool],
102 NonPagedPool,
103 0,
104 Threshold,
105 NULL);
106 }
107 else
108 {
109 //
110 // Allocate the pool descriptor
111 //
112 Descriptor = ExAllocatePoolWithTag(NonPagedPool,
113 sizeof(KGUARDED_MUTEX) +
114 sizeof(POOL_DESCRIPTOR),
115 'looP');
116 if (!Descriptor)
117 {
118 //
119 // This is really bad...
120 //
121 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
122 0,
123 -1,
124 -1,
125 -1);
126 }
127
128 //
129 // Setup the vector and guarded mutex for paged pool
130 //
131 PoolVector[PagedPool] = Descriptor;
132 ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1);
133 KeInitializeGuardedMutex(ExpPagedPoolMutex);
134 ExInitializePoolDescriptor(Descriptor,
135 PagedPool,
136 0,
137 Threshold,
138 ExpPagedPoolMutex);
139 }
140 }
141
142 FORCEINLINE
143 KIRQL
144 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
145 {
146 //
147 // Check if this is nonpaged pool
148 //
149 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
150 {
151 //
152 // Use the queued spin lock
153 //
154 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
155 }
156 else
157 {
158 //
159 // Use the guarded mutex
160 //
161 KeAcquireGuardedMutex(Descriptor->LockAddress);
162 return APC_LEVEL;
163 }
164 }
165
166 FORCEINLINE
167 VOID
168 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor,
169 IN KIRQL OldIrql)
170 {
171 //
172 // Check if this is nonpaged pool
173 //
174 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
175 {
176 //
177 // Use the queued spin lock
178 //
179 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
180 }
181 else
182 {
183 //
184 // Use the guarded mutex
185 //
186 KeReleaseGuardedMutex(Descriptor->LockAddress);
187 }
188 }
189
190 /* PUBLIC FUNCTIONS ***********************************************************/
191
192 /*
193 * @implemented
194 */
195 PVOID
196 NTAPI
197 ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
198 IN SIZE_T NumberOfBytes,
199 IN ULONG Tag)
200 {
201 PPOOL_DESCRIPTOR PoolDesc;
202 PLIST_ENTRY ListHead;
203 PPOOL_HEADER Entry, NextEntry, FragmentEntry;
204 KIRQL OldIrql;
205 ULONG BlockSize, i;
206
207 //
208 // Check for paged pool
209 //
210 if (PoolType == PagedPool) return ExAllocatePagedPoolWithTag(PagedPool, NumberOfBytes, Tag);
211
212 //
213 // Some sanity checks
214 //
215 ASSERT(Tag != 0);
216 ASSERT(Tag != ' GIB');
217 ASSERT(NumberOfBytes != 0);
218
219 //
220 // Get the pool type and its corresponding vector for this request
221 //
222 PoolType = PoolType & BASE_POOL_TYPE_MASK;
223 PoolDesc = PoolVector[PoolType];
224 ASSERT(PoolDesc != NULL);
225
226 //
227 // Check if this is a big page allocation
228 //
229 if (NumberOfBytes > POOL_MAX_ALLOC)
230 {
231 //
232 // Then just return the number of pages requested
233 //
234 return MiAllocatePoolPages(PoolType, NumberOfBytes);
235 }
236
237 //
238 // Should never request 0 bytes from the pool, but since so many drivers do
239 // it, we'll just assume they want 1 byte, based on NT's similar behavior
240 //
241 if (!NumberOfBytes) NumberOfBytes = 1;
242
243 //
244 // A pool allocation is defined by its data, a linked list to connect it to
245 // the free list (if necessary), and a pool header to store accounting info.
246 // Calculate this size, then convert it into a block size (units of pool
247 // headers)
248 //
249 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
250 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
251 // the direct allocation of pages.
252 //
253 i = (NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1)) / POOL_BLOCK_SIZE;
254
255 //
256 // Loop in the free lists looking for a block if this size. Start with the
257 // list optimized for this kind of size lookup
258 //
259 ListHead = &PoolDesc->ListHeads[i];
260 do
261 {
262 //
263 // Are there any free entries available on this list?
264 //
265 if (!IsListEmpty(ListHead))
266 {
267 //
268 // Acquire the pool lock now
269 //
270 OldIrql = ExLockPool(PoolDesc);
271
272 //
273 // And make sure the list still has entries
274 //
275 if (IsListEmpty(ListHead))
276 {
277 //
278 // Someone raced us (and won) before we had a chance to acquire
279 // the lock.
280 //
281 // Try again!
282 //
283 ExUnlockPool(PoolDesc, OldIrql);
284 ListHead++;
285 continue;
286 }
287
288 //
289 // Remove a free entry from the list
290 // Note that due to the way we insert free blocks into multiple lists
291 // there is a guarantee that any block on this list will either be
292 // of the correct size, or perhaps larger.
293 //
294 Entry = POOL_ENTRY(RemoveHeadList(ListHead));
295 ASSERT(Entry->BlockSize >= i);
296 ASSERT(Entry->PoolType == 0);
297
298 //
299 // Check if this block is larger that what we need. The block could
300 // not possibly be smaller, due to the reason explained above (and
301 // we would've asserted on a checked build if this was the case).
302 //
303 if (Entry->BlockSize != i)
304 {
305 //
306 // Is there an entry before this one?
307 //
308 if (Entry->PreviousSize == 0)
309 {
310 //
311 // There isn't anyone before us, so take the next block and
312 // turn it into a fragment that contains the leftover data
313 // that we don't need to satisfy the caller's request
314 //
315 FragmentEntry = POOL_BLOCK(Entry, i);
316 FragmentEntry->BlockSize = Entry->BlockSize - i;
317
318 //
319 // And make it point back to us
320 //
321 FragmentEntry->PreviousSize = i;
322
323 //
324 // Now get the block that follows the new fragment and check
325 // if it's still on the same page as us (and not at the end)
326 //
327 NextEntry = POOL_NEXT_BLOCK(FragmentEntry);
328 if (PAGE_ALIGN(NextEntry) != NextEntry)
329 {
330 //
331 // Adjust this next block to point to our newly created
332 // fragment block
333 //
334 NextEntry->PreviousSize = FragmentEntry->BlockSize;
335 }
336 }
337 else
338 {
339 //
340 // There is a free entry before us, which we know is smaller
341 // so we'll make this entry the fragment instead
342 //
343 FragmentEntry = Entry;
344
345 //
346 // And then we'll remove from it the actual size required.
347 // Now the entry is a leftover free fragment
348 //
349 Entry->BlockSize -= i;
350
351 //
352 // Now let's go to the next entry after the fragment (which
353 // used to point to our original free entry) and make it
354 // reference the new fragment entry instead.
355 //
356 // This is the entry that will actually end up holding the
357 // allocation!
358 //
359 Entry = POOL_NEXT_BLOCK(Entry);
360 Entry->PreviousSize = FragmentEntry->BlockSize;
361
362 //
363 // And now let's go to the entry after that one and check if
364 // it's still on the same page, and not at the end
365 //
366 NextEntry = POOL_BLOCK(Entry, i);
367 if (PAGE_ALIGN(NextEntry) != NextEntry)
368 {
369 //
370 // Make it reference the allocation entry
371 //
372 NextEntry->PreviousSize = i;
373 }
374 }
375
376 //
377 // Now our (allocation) entry is the right size
378 //
379 Entry->BlockSize = i;
380
381 //
382 // And the next entry is now the free fragment which contains
383 // the remaining difference between how big the original entry
384 // was, and the actual size the caller needs/requested.
385 //
386 FragmentEntry->PoolType = 0;
387 BlockSize = FragmentEntry->BlockSize;
388
389 //
390 // Now check if enough free bytes remained for us to have a
391 // "full" entry, which contains enough bytes for a linked list
392 // and thus can be used for allocations (up to 8 bytes...)
393 //
394 if (BlockSize != 1)
395 {
396 //
397 // Insert the free entry into the free list for this size
398 //
399 InsertTailList(&PoolDesc->ListHeads[BlockSize - 1],
400 POOL_FREE_BLOCK(FragmentEntry));
401 }
402 }
403
404 //
405 // We have found an entry for this allocation, so set the pool type
406 // and release the lock since we're done
407 //
408 Entry->PoolType = PoolType + 1;
409 ExUnlockPool(PoolDesc, OldIrql);
410
411 //
412 // Return the pool allocation
413 //
414 Entry->PoolTag = Tag;
415 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
416 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
417 return POOL_FREE_BLOCK(Entry);
418 }
419 } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
420
421 //
422 // There were no free entries left, so we have to allocate a new fresh page
423 //
424 Entry = MiAllocatePoolPages(PoolType, PAGE_SIZE);
425 ASSERT(Entry != NULL);
426 Entry->Ulong1 = 0;
427 Entry->BlockSize = i;
428 Entry->PoolType = PoolType + 1;
429
430 //
431 // This page will have two entries -- one for the allocation (which we just
432 // created above), and one for the remaining free bytes, which we're about
433 // to create now. The free bytes are the whole page minus what was allocated
434 // and then converted into units of block headers.
435 //
436 BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i;
437 FragmentEntry = POOL_BLOCK(Entry, i);
438 FragmentEntry->Ulong1 = 0;
439 FragmentEntry->BlockSize = BlockSize;
440 FragmentEntry->PreviousSize = i;
441
442 //
443 // Now check if enough free bytes remained for us to have a "full" entry,
444 // which contains enough bytes for a linked list and thus can be used for
445 // allocations (up to 8 bytes...)
446 //
447 if (FragmentEntry->BlockSize != 1)
448 {
449 //
450 // Excellent -- acquire the pool lock
451 //
452 OldIrql = ExLockPool(PoolDesc);
453
454 //
455 // And insert the free entry into the free list for this block size
456 //
457 InsertTailList(&PoolDesc->ListHeads[BlockSize - 1],
458 POOL_FREE_BLOCK(FragmentEntry));
459
460 //
461 // Release the pool lock
462 //
463 ExUnlockPool(PoolDesc, OldIrql);
464 }
465
466 //
467 // And return the pool allocation
468 //
469 Entry->PoolTag = Tag;
470 return POOL_FREE_BLOCK(Entry);
471 }
472
473 /*
474 * @implemented
475 */
476 PVOID
477 NTAPI
478 ExAllocatePool(POOL_TYPE PoolType,
479 SIZE_T NumberOfBytes)
480 {
481 //
482 // Use a default tag of "None"
483 //
484 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, 'enoN');
485 }
486
487 /*
488 * @implemented
489 */
490 VOID
491 NTAPI
492 ExFreePoolWithTag(IN PVOID P,
493 IN ULONG TagToFree)
494 {
495 PPOOL_HEADER Entry, NextEntry;
496 ULONG BlockSize;
497 KIRQL OldIrql;
498 POOL_TYPE PoolType;
499 PPOOL_DESCRIPTOR PoolDesc;
500 BOOLEAN Combined = FALSE;
501 #if 1
502 //
503 // Check for paged pool
504 //
505 if ((P >= MmPagedPoolBase) &&
506 (P <= (PVOID)((ULONG_PTR)MmPagedPoolBase + MmPagedPoolSize)))
507 {
508 //
509 // Use old allocator
510 //
511 ExFreePagedPool(P);
512 return;
513 }
514 #endif
515
516 //
517 // Quickly deal with big page allocations
518 //
519 if (PAGE_ALIGN(P) == P)
520 {
521 MiFreePoolPages(P);
522 return;
523 }
524
525 //
526 // Get the entry for this pool allocation
527 // The pointer math here may look wrong or confusing, but it is quite right
528 //
529 Entry = P;
530 Entry--;
531
532 //
533 // Get the size of the entry, and it's pool type, then load the descriptor
534 // for this pool type
535 //
536 BlockSize = Entry->BlockSize;
537 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
538 PoolDesc = PoolVector[PoolType];
539
540 //
541 // Get the pointer to the next entry
542 //
543 NextEntry = POOL_BLOCK(Entry, BlockSize);
544
545 //
546 // Acquire the pool lock
547 //
548 OldIrql = ExLockPool(PoolDesc);
549
550 //
551 // Check if the next allocation is at the end of the page
552 //
553 if (PAGE_ALIGN(NextEntry) != NextEntry)
554 {
555 //
556 // We may be able to combine the block if it's free
557 //
558 if (NextEntry->PoolType == 0)
559 {
560 //
561 // The next block is free, so we'll do a combine
562 //
563 Combined = TRUE;
564
565 //
566 // Make sure there's actual data in the block -- anything smaller
567 // than this means we only have the header, so there's no linked list
568 // for us to remove
569 //
570 if ((NextEntry->BlockSize != 1))
571 {
572 //
573 // The block is at least big enough to have a linked list, so go
574 // ahead and remove it
575 //
576 RemoveEntryList(POOL_FREE_BLOCK(NextEntry));
577 }
578
579 //
580 // Our entry is now combined with the next entry
581 //
582 Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
583 }
584 }
585
586 //
587 // Now check if there was a previous entry on the same page as us
588 //
589 if (Entry->PreviousSize)
590 {
591 //
592 // Great, grab that entry and check if it's free
593 //
594 NextEntry = POOL_PREV_BLOCK(Entry);
595 if (NextEntry->PoolType == 0)
596 {
597 //
598 // It is, so we can do a combine
599 //
600 Combined = TRUE;
601
602 //
603 // Make sure there's actual data in the block -- anything smaller
604 // than this means we only have the header so there's no linked list
605 // for us to remove
606 //
607 if ((NextEntry->BlockSize != 1))
608 {
609 //
610 // The block is at least big enough to have a linked list, so go
611 // ahead and remove it
612 //
613 RemoveEntryList(POOL_FREE_BLOCK(NextEntry));
614 }
615
616 //
617 // Combine our original block (which might've already been combined
618 // with the next block), into the previous block
619 //
620 NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
621
622 //
623 // And now we'll work with the previous block instead
624 //
625 Entry = NextEntry;
626 }
627 }
628
629 //
630 // By now, it may have been possible for our combined blocks to actually
631 // have made up a full page (if there were only 2-3 allocations on the
632 // page, they could've all been combined).
633 //
634 if ((PAGE_ALIGN(Entry) == Entry) &&
635 (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry)) == POOL_NEXT_BLOCK(Entry)))
636 {
637 //
638 // In this case, release the pool lock, and free the page
639 //
640 ExUnlockPool(PoolDesc, OldIrql);
641 MiFreePoolPages(Entry);
642 return;
643 }
644
645 //
646 // Otherwise, we now have a free block (or a combination of 2 or 3)
647 //
648 Entry->PoolType = 0;
649 BlockSize = Entry->BlockSize;
650 ASSERT(BlockSize != 1);
651
652 //
653 // Check if we actually did combine it with anyone
654 //
655 if (Combined)
656 {
657 //
658 // Get the first combined block (either our original to begin with, or
659 // the one after the original, depending if we combined with the previous)
660 //
661 NextEntry = POOL_NEXT_BLOCK(Entry);
662
663 //
664 // As long as the next block isn't on a page boundary, have it point
665 // back to us
666 //
667 if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
668 }
669
670 //
671 // Insert this new free block, and release the pool lock
672 //
673 InsertHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
674 ExUnlockPool(PoolDesc, OldIrql);
675 }
676
677 /*
678 * @implemented
679 */
680 VOID
681 NTAPI
682 ExFreePool(PVOID P)
683 {
684 //
685 // Just free without checking for the tag
686 //
687 ExFreePoolWithTag(P, 0);
688 }
689
690 /*
691 * @unimplemented
692 */
693 SIZE_T
694 NTAPI
695 ExQueryPoolBlockSize(IN PVOID PoolBlock,
696 OUT PBOOLEAN QuotaCharged)
697 {
698 //
699 // Not implemented
700 //
701 UNIMPLEMENTED;
702 return FALSE;
703 }
704
705 /*
706 * @implemented
707 */
708
709 PVOID
710 NTAPI
711 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType,
712 IN SIZE_T NumberOfBytes)
713 {
714 //
715 // Allocate the pool
716 //
717 return ExAllocatePoolWithQuotaTag(PoolType, NumberOfBytes, 'enoN');
718 }
719
720 /*
721 * @implemented
722 */
723 PVOID
724 NTAPI
725 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType,
726 IN SIZE_T NumberOfBytes,
727 IN ULONG Tag,
728 IN EX_POOL_PRIORITY Priority)
729 {
730 //
731 // Allocate the pool
732 //
733 UNIMPLEMENTED;
734 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
735 }
736
737 /*
738 * @implemented
739 */
740 PVOID
741 NTAPI
742 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType,
743 IN SIZE_T NumberOfBytes,
744 IN ULONG Tag)
745 {
746 //
747 // Allocate the pool
748 //
749 UNIMPLEMENTED;
750 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
751 }
752
753 /* EOF */