Merge aicom-network-branch (without NDIS changes for now)
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / expool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::EXPOOL"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
18
19 #undef ExAllocatePoolWithQuota
20 #undef ExAllocatePoolWithQuotaTag
21
22 /* GLOBALS ********************************************************************/
23
24 ULONG ExpNumberOfPagedPools;
25 POOL_DESCRIPTOR NonPagedPoolDescriptor;
26 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16 + 1];
27 PPOOL_DESCRIPTOR PoolVector[2];
28 PVOID PoolTrackTable;
29 PKGUARDED_MUTEX ExpPagedPoolMutex;
30
31 /* PRIVATE FUNCTIONS **********************************************************/
32
33 VOID
34 NTAPI
35 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor,
36 IN POOL_TYPE PoolType,
37 IN ULONG PoolIndex,
38 IN ULONG Threshold,
39 IN PVOID PoolLock)
40 {
41 PLIST_ENTRY NextEntry, LastEntry;
42
43 //
44 // Setup the descriptor based on the caller's request
45 //
46 PoolDescriptor->PoolType = PoolType;
47 PoolDescriptor->PoolIndex = PoolIndex;
48 PoolDescriptor->Threshold = Threshold;
49 PoolDescriptor->LockAddress = PoolLock;
50
51 //
52 // Initialize accounting data
53 //
54 PoolDescriptor->RunningAllocs = 0;
55 PoolDescriptor->RunningDeAllocs = 0;
56 PoolDescriptor->TotalPages = 0;
57 PoolDescriptor->TotalBytes = 0;
58 PoolDescriptor->TotalBigPages = 0;
59
60 //
61 // Nothing pending for now
62 //
63 PoolDescriptor->PendingFrees = NULL;
64 PoolDescriptor->PendingFreeDepth = 0;
65
66 //
67 // Loop all the descriptor's allocation lists and initialize them
68 //
69 NextEntry = PoolDescriptor->ListHeads;
70 LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
71 while (NextEntry < LastEntry) InitializeListHead(NextEntry++);
72 }
73
74 VOID
75 NTAPI
76 InitializePool(IN POOL_TYPE PoolType,
77 IN ULONG Threshold)
78 {
79 PPOOL_DESCRIPTOR Descriptor;
80
81 //
82 // Check what kind of pool this is
83 //
84 if (PoolType == NonPagedPool)
85 {
86 //
87 // Initialize the nonpaged pool descriptor
88 //
89 PoolVector[NonPagedPool] = &NonPagedPoolDescriptor;
90 ExInitializePoolDescriptor(PoolVector[NonPagedPool],
91 NonPagedPool,
92 0,
93 Threshold,
94 NULL);
95 }
96 else
97 {
98 //
99 // Allocate the pool descriptor
100 //
101 Descriptor = ExAllocatePoolWithTag(NonPagedPool,
102 sizeof(KGUARDED_MUTEX) +
103 sizeof(POOL_DESCRIPTOR),
104 'looP');
105 if (!Descriptor)
106 {
107 //
108 // This is really bad...
109 //
110 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
111 0,
112 -1,
113 -1,
114 -1);
115 }
116
117 //
118 // Setup the vector and guarded mutex for paged pool
119 //
120 PoolVector[PagedPool] = Descriptor;
121 ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1);
122 KeInitializeGuardedMutex(ExpPagedPoolMutex);
123 ExInitializePoolDescriptor(Descriptor,
124 PagedPool,
125 0,
126 Threshold,
127 ExpPagedPoolMutex);
128 }
129 }
130
131 FORCEINLINE
132 KIRQL
133 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
134 {
135 //
136 // Check if this is nonpaged pool
137 //
138 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
139 {
140 //
141 // Use the queued spin lock
142 //
143 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
144 }
145 else
146 {
147 //
148 // Use the guarded mutex
149 //
150 KeAcquireGuardedMutex(Descriptor->LockAddress);
151 return APC_LEVEL;
152 }
153 }
154
155 FORCEINLINE
156 VOID
157 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor,
158 IN KIRQL OldIrql)
159 {
160 //
161 // Check if this is nonpaged pool
162 //
163 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
164 {
165 //
166 // Use the queued spin lock
167 //
168 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
169 }
170 else
171 {
172 //
173 // Use the guarded mutex
174 //
175 KeReleaseGuardedMutex(Descriptor->LockAddress);
176 }
177 }
178
179 /* PUBLIC FUNCTIONS ***********************************************************/
180
181 /*
182 * @implemented
183 */
184 PVOID
185 NTAPI
186 ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
187 IN SIZE_T NumberOfBytes,
188 IN ULONG Tag)
189 {
190 PPOOL_DESCRIPTOR PoolDesc;
191 PLIST_ENTRY ListHead;
192 PPOOL_HEADER Entry, NextEntry, FragmentEntry;
193 KIRQL OldIrql;
194 ULONG BlockSize, i;
195
196 //
197 // Check for paged pool
198 //
199 if (PoolType == PagedPool) return ExAllocatePagedPoolWithTag(PagedPool, NumberOfBytes, Tag);
200
201 //
202 // Some sanity checks
203 //
204 ASSERT(Tag != 0);
205 ASSERT(Tag != ' GIB');
206 ASSERT(NumberOfBytes != 0);
207
208 //
209 // Get the pool type and its corresponding vector for this request
210 //
211 PoolType = PoolType & BASE_POOL_TYPE_MASK;
212 PoolDesc = PoolVector[PoolType];
213 ASSERT(PoolDesc != NULL);
214
215 //
216 // Check if this is a big page allocation
217 //
218 if (NumberOfBytes > POOL_MAX_ALLOC)
219 {
220 //
221 // Then just return the number of pages requested
222 //
223 return MiAllocatePoolPages(PoolType, NumberOfBytes);
224 }
225
226 //
227 // Should never request 0 bytes from the pool, but since so many drivers do
228 // it, we'll just assume they want 1 byte, based on NT's similar behavior
229 //
230 if (!NumberOfBytes) NumberOfBytes = 1;
231
232 //
233 // A pool allocation is defined by its data, a linked list to connect it to
234 // the free list (if necessary), and a pool header to store accounting info.
235 // Calculate this size, then convert it into a block size (units of pool
236 // headers)
237 //
238 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
239 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
240 // the direct allocation of pages.
241 //
242 i = (NumberOfBytes + sizeof(POOL_HEADER) + sizeof(LIST_ENTRY) - 1) /
243 sizeof(POOL_HEADER);
244
245 //
246 // Loop in the free lists looking for a block if this size. Start with the
247 // list optimized for this kind of size lookup
248 //
249 ListHead = &PoolDesc->ListHeads[i];
250 do
251 {
252 //
253 // Are there any free entries available on this list?
254 //
255 if (!IsListEmpty(ListHead))
256 {
257 //
258 // Acquire the pool lock now
259 //
260 OldIrql = ExLockPool(PoolDesc);
261
262 //
263 // And make sure the list still has entries
264 //
265 if (IsListEmpty(ListHead))
266 {
267 //
268 // Someone raced us (and won) before we had a chance to acquire
269 // the lock.
270 //
271 // Try again!
272 //
273 ExUnlockPool(PoolDesc, OldIrql);
274 ListHead++;
275 continue;
276 }
277
278 //
279 // Remove a free entry from the list
280 // Note that due to the way we insert free blocks into multiple lists
281 // there is a guarantee that any block on this list will either be
282 // of the correct size, or perhaps larger.
283 //
284 Entry = (PPOOL_HEADER)RemoveHeadList(ListHead) - 1;
285 ASSERT(Entry->BlockSize >= i);
286 ASSERT(Entry->PoolType == 0);
287
288 //
289 // Check if this block is larger that what we need. The block could
290 // not possibly be smaller, due to the reason explained above (and
291 // we would've asserted on a checked build if this was the case).
292 //
293 if (Entry->BlockSize != i)
294 {
295 //
296 // Is there an entry before this one?
297 //
298 if (Entry->PreviousSize == 0)
299 {
300 //
301 // There isn't anyone before us, so take the next block and
302 // turn it into a fragment that contains the leftover data
303 // that we don't need to satisfy the caller's request
304 //
305 FragmentEntry = Entry + i;
306 FragmentEntry->BlockSize = Entry->BlockSize - i;
307
308 //
309 // And make it point back to us
310 //
311 FragmentEntry->PreviousSize = i;
312
313 //
314 // Now get the block that follows the new fragment and check
315 // if it's still on the same page as us (and not at the end)
316 //
317 NextEntry = FragmentEntry + FragmentEntry->BlockSize;
318 if (PAGE_ALIGN(NextEntry) != NextEntry)
319 {
320 //
321 // Adjust this next block to point to our newly created
322 // fragment block
323 //
324 NextEntry->PreviousSize = FragmentEntry->BlockSize;
325 }
326 }
327 else
328 {
329 //
330 // There is a free entry before us, which we know is smaller
331 // so we'll make this entry the fragment instead
332 //
333 FragmentEntry = Entry;
334
335 //
336 // And then we'll remove from it the actual size required.
337 // Now the entry is a leftover free fragment
338 //
339 Entry->BlockSize -= i;
340
341 //
342 // Now let's go to the next entry after the fragment (which
343 // used to point to our original free entry) and make it
344 // reference the new fragment entry instead.
345 //
346 // This is the entry that will actually end up holding the
347 // allocation!
348 //
349 Entry += Entry->BlockSize;
350 Entry->PreviousSize = FragmentEntry->BlockSize;
351
352 //
353 // And now let's go to the entry after that one and check if
354 // it's still on the same page, and not at the end
355 //
356 NextEntry = Entry + i;
357 if (PAGE_ALIGN(NextEntry) != NextEntry)
358 {
359 //
360 // Make it reference the allocation entry
361 //
362 NextEntry->PreviousSize = i;
363 }
364 }
365
366 //
367 // Now our (allocation) entry is the right size
368 //
369 Entry->BlockSize = i;
370
371 //
372 // And the next entry is now the free fragment which contains
373 // the remaining difference between how big the original entry
374 // was, and the actual size the caller needs/requested.
375 //
376 FragmentEntry->PoolType = 0;
377 BlockSize = FragmentEntry->BlockSize;
378
379 //
380 // Now check if enough free bytes remained for us to have a
381 // "full" entry, which contains enough bytes for a linked list
382 // and thus can be used for allocations (up to 8 bytes...)
383 //
384 if (BlockSize != 1)
385 {
386 //
387 // Insert the free entry into the free list for this size
388 //
389 InsertTailList(&PoolDesc->ListHeads[BlockSize - 1],
390 (PLIST_ENTRY)FragmentEntry + 1);
391 }
392 }
393
394 //
395 // We have found an entry for this allocation, so set the pool type
396 // and release the lock since we're done
397 //
398 Entry->PoolType = PoolType + 1;
399 ExUnlockPool(PoolDesc, OldIrql);
400
401 //
402 // Return the pool allocation
403 //
404 Entry->PoolTag = Tag;
405 return ++Entry;
406 }
407 } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
408
409 //
410 // There were no free entries left, so we have to allocate a new fresh page
411 //
412 Entry = MiAllocatePoolPages(PoolType, PAGE_SIZE);
413 Entry->Ulong1 = 0;
414 Entry->BlockSize = i;
415 Entry->PoolType = PoolType + 1;
416
417 //
418 // This page will have two entries -- one for the allocation (which we just
419 // created above), and one for the remaining free bytes, which we're about
420 // to create now. The free bytes are the whole page minus what was allocated
421 // and then converted into units of block headers.
422 //
423 BlockSize = (PAGE_SIZE / sizeof(POOL_HEADER)) - i;
424 FragmentEntry = Entry + i;
425 FragmentEntry->Ulong1 = 0;
426 FragmentEntry->BlockSize = BlockSize;
427 FragmentEntry->PreviousSize = i;
428
429 //
430 // Now check if enough free bytes remained for us to have a "full" entry,
431 // which contains enough bytes for a linked list and thus can be used for
432 // allocations (up to 8 bytes...)
433 //
434 if (FragmentEntry->BlockSize != 1)
435 {
436 //
437 // Excellent -- acquire the pool lock
438 //
439 OldIrql = ExLockPool(PoolDesc);
440
441 //
442 // And insert the free entry into the free list for this block size
443 //
444 InsertTailList(&PoolDesc->ListHeads[BlockSize - 1],
445 (PLIST_ENTRY)FragmentEntry + 1);
446
447 //
448 // Release the pool lock
449 //
450 ExUnlockPool(PoolDesc, OldIrql);
451 }
452
453 //
454 // And return the pool allocation
455 //
456 Entry->PoolTag = Tag;
457 return ++Entry;
458 }
459
460 /*
461 * @implemented
462 */
463 PVOID
464 NTAPI
465 ExAllocatePool(POOL_TYPE PoolType,
466 SIZE_T NumberOfBytes)
467 {
468 //
469 // Use a default tag of "None"
470 //
471 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, 'enoN');
472 }
473
474 /*
475 * @implemented
476 */
477 VOID
478 NTAPI
479 ExFreePoolWithTag(IN PVOID P,
480 IN ULONG TagToFree)
481 {
482 PPOOL_HEADER Entry, NextEntry;
483 ULONG BlockSize;
484 KIRQL OldIrql;
485 POOL_TYPE PoolType;
486 PPOOL_DESCRIPTOR PoolDesc;
487 BOOLEAN Combined = FALSE;
488
489 //
490 // Check for paged pool
491 //
492 if ((P >= MmPagedPoolBase) &&
493 (P <= (PVOID)((ULONG_PTR)MmPagedPoolBase + MmPagedPoolSize)))
494 {
495 //
496 // Use old allocator
497 //
498 ExFreePagedPool(P);
499 return;
500 }
501
502 //
503 // Quickly deal with big page allocations
504 //
505 if (PAGE_ALIGN(P) == P)
506 {
507 MiFreePoolPages(P);
508 return;
509 }
510
511 //
512 // Get the entry for this pool allocation
513 // The pointer math here may look wrong or confusing, but it is quite right
514 //
515 Entry = P;
516 Entry--;
517
518 //
519 // Get the size of the entry, and it's pool type, then load the descriptor
520 // for this pool type
521 //
522 BlockSize = Entry->BlockSize;
523 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
524 PoolDesc = PoolVector[PoolType];
525
526 //
527 // Get the pointer to the next entry
528 //
529 NextEntry = Entry + BlockSize;
530
531 //
532 // Acquire the pool lock
533 //
534 OldIrql = ExLockPool(PoolDesc);
535
536 //
537 // Check if the next allocation is at the end of the page
538 //
539 if (PAGE_ALIGN(NextEntry) != NextEntry)
540 {
541 //
542 // We may be able to combine the block if it's free
543 //
544 if (NextEntry->PoolType == 0)
545 {
546 //
547 // The next block is free, so we'll do a combine
548 //
549 Combined = TRUE;
550
551 //
552 // Make sure there's actual data in the block -- anything smaller
553 // than this means we only have the header, so there's no linked list
554 // for us to remove
555 //
556 if ((NextEntry->BlockSize != 1))
557 {
558 //
559 // The block is at least big enough to have a linked list, so go
560 // ahead and remove it
561 //
562 RemoveEntryList((PLIST_ENTRY)NextEntry + 1);
563 }
564
565 //
566 // Our entry is now combined with the next entry
567 //
568 Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
569 }
570 }
571
572 //
573 // Now check if there was a previous entry on the same page as us
574 //
575 if (Entry->PreviousSize)
576 {
577 //
578 // Great, grab that entry and check if it's free
579 //
580 NextEntry = Entry - Entry->PreviousSize;
581 if (NextEntry->PoolType == 0)
582 {
583 //
584 // It is, so we can do a combine
585 //
586 Combined = TRUE;
587
588 //
589 // Make sure there's actual data in the block -- anything smaller
590 // than this means we only have the header so there's no linked list
591 // for us to remove
592 //
593 if ((NextEntry->BlockSize != 1))
594 {
595 //
596 // The block is at least big enough to have a linked list, so go
597 // ahead and remove it
598 //
599 RemoveEntryList((PLIST_ENTRY)NextEntry + 1);
600 }
601
602 //
603 // Combine our original block (which might've already been combined
604 // with the next block), into the previous block
605 //
606 NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
607
608 //
609 // And now we'll work with the previous block instead
610 //
611 Entry = NextEntry;
612 }
613 }
614
615 //
616 // By now, it may have been possible for our combined blocks to actually
617 // have made up a full page (if there were only 2-3 allocations on the
618 // page, they could've all been combined).
619 //
620 if ((PAGE_ALIGN(Entry) == Entry) &&
621 (PAGE_ALIGN(Entry + Entry->BlockSize) == Entry + Entry->BlockSize))
622 {
623 //
624 // In this case, release the pool lock, and free the page
625 //
626 ExUnlockPool(PoolDesc, OldIrql);
627 MiFreePoolPages(Entry);
628 return;
629 }
630
631 //
632 // Otherwise, we now have a free block (or a combination of 2 or 3)
633 //
634 Entry->PoolType = 0;
635 BlockSize = Entry->BlockSize;
636 ASSERT(BlockSize != 1);
637
638 //
639 // Check if we actually did combine it with anyone
640 //
641 if (Combined)
642 {
643 //
644 // Get the first combined block (either our original to begin with, or
645 // the one after the original, depending if we combined with the previous)
646 //
647 NextEntry = Entry + BlockSize;
648
649 //
650 // As long as the next block isn't on a page boundary, have it point
651 // back to us
652 //
653 if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
654 }
655
656 //
657 // Insert this new free block, and release the pool lock
658 //
659 InsertHeadList(&PoolDesc->ListHeads[BlockSize - 1], (PLIST_ENTRY)Entry + 1);
660 ExUnlockPool(PoolDesc, OldIrql);
661 }
662
663 /*
664 * @implemented
665 */
666 VOID
667 NTAPI
668 ExFreePool(PVOID P)
669 {
670 //
671 // Just free without checking for the tag
672 //
673 ExFreePoolWithTag(P, 0);
674 }
675
676 /*
677 * @unimplemented
678 */
679 SIZE_T
680 NTAPI
681 ExQueryPoolBlockSize(IN PVOID PoolBlock,
682 OUT PBOOLEAN QuotaCharged)
683 {
684 //
685 // Not implemented
686 //
687 UNIMPLEMENTED;
688 return FALSE;
689 }
690
691 /*
692 * @implemented
693 */
694
695 PVOID
696 NTAPI
697 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType,
698 IN SIZE_T NumberOfBytes)
699 {
700 //
701 // Allocate the pool
702 //
703 return ExAllocatePoolWithQuotaTag(PoolType, NumberOfBytes, 'enoN');
704 }
705
706 /*
707 * @implemented
708 */
709 PVOID
710 NTAPI
711 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType,
712 IN SIZE_T NumberOfBytes,
713 IN ULONG Tag,
714 IN EX_POOL_PRIORITY Priority)
715 {
716 //
717 // Allocate the pool
718 //
719 UNIMPLEMENTED;
720 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
721 }
722
723 /*
724 * @implemented
725 */
726 PVOID
727 NTAPI
728 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType,
729 IN SIZE_T NumberOfBytes,
730 IN ULONG Tag)
731 {
732 //
733 // Allocate the pool
734 //
735 UNIMPLEMENTED;
736 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
737 }
738
739 /* EOF */