Merge from amd64 branch:
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / expool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::EXPOOL"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
18
19 #undef ExAllocatePoolWithQuota
20 #undef ExAllocatePoolWithQuotaTag
21
22 /* GLOBALS ********************************************************************/
23
24 ULONG ExpNumberOfPagedPools;
25 POOL_DESCRIPTOR NonPagedPoolDescriptor;
26 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16 + 1];
27 PPOOL_DESCRIPTOR PoolVector[2];
28 PVOID PoolTrackTable;
29 PKGUARDED_MUTEX ExpPagedPoolMutex;
30
31 /* PRIVATE FUNCTIONS **********************************************************/
32
33 VOID
34 NTAPI
35 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor,
36 IN POOL_TYPE PoolType,
37 IN ULONG PoolIndex,
38 IN ULONG Threshold,
39 IN PVOID PoolLock)
40 {
41 PLIST_ENTRY NextEntry, LastEntry;
42
43 //
44 // Setup the descriptor based on the caller's request
45 //
46 PoolDescriptor->PoolType = PoolType;
47 PoolDescriptor->PoolIndex = PoolIndex;
48 PoolDescriptor->Threshold = Threshold;
49 PoolDescriptor->LockAddress = PoolLock;
50
51 //
52 // Initialize accounting data
53 //
54 PoolDescriptor->RunningAllocs = 0;
55 PoolDescriptor->RunningDeAllocs = 0;
56 PoolDescriptor->TotalPages = 0;
57 PoolDescriptor->TotalBytes = 0;
58 PoolDescriptor->TotalBigPages = 0;
59
60 //
61 // Nothing pending for now
62 //
63 PoolDescriptor->PendingFrees = NULL;
64 PoolDescriptor->PendingFreeDepth = 0;
65
66 //
67 // Loop all the descriptor's allocation lists and initialize them
68 //
69 NextEntry = PoolDescriptor->ListHeads;
70 LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
71 while (NextEntry < LastEntry) InitializeListHead(NextEntry++);
72 }
73
74 VOID
75 NTAPI
76 InitializePool(IN POOL_TYPE PoolType,
77 IN ULONG Threshold)
78 {
79 PPOOL_DESCRIPTOR Descriptor;
80
81 //
82 // Check what kind of pool this is
83 //
84 if (PoolType == NonPagedPool)
85 {
86 //
87 // Initialize the nonpaged pool descriptor
88 //
89 PoolVector[NonPagedPool] = &NonPagedPoolDescriptor;
90 ExInitializePoolDescriptor(PoolVector[NonPagedPool],
91 NonPagedPool,
92 0,
93 Threshold,
94 NULL);
95 }
96 else
97 {
98 //
99 // Allocate the pool descriptor
100 //
101 Descriptor = ExAllocatePoolWithTag(NonPagedPool,
102 sizeof(KGUARDED_MUTEX) +
103 sizeof(POOL_DESCRIPTOR),
104 'looP');
105 if (!Descriptor)
106 {
107 //
108 // This is really bad...
109 //
110 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
111 0,
112 -1,
113 -1,
114 -1);
115 }
116
117 //
118 // Setup the vector and guarded mutex for paged pool
119 //
120 PoolVector[PagedPool] = Descriptor;
121 ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1);
122 KeInitializeGuardedMutex(ExpPagedPoolMutex);
123 ExInitializePoolDescriptor(Descriptor,
124 PagedPool,
125 0,
126 Threshold,
127 ExpPagedPoolMutex);
128 }
129 }
130
131 FORCEINLINE
132 KIRQL
133 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
134 {
135 //
136 // Check if this is nonpaged pool
137 //
138 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
139 {
140 //
141 // Use the queued spin lock
142 //
143 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
144 }
145 else
146 {
147 //
148 // Use the guarded mutex
149 //
150 KeAcquireGuardedMutex(Descriptor->LockAddress);
151 return APC_LEVEL;
152 }
153 }
154
155 FORCEINLINE
156 VOID
157 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor,
158 IN KIRQL OldIrql)
159 {
160 //
161 // Check if this is nonpaged pool
162 //
163 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
164 {
165 //
166 // Use the queued spin lock
167 //
168 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
169 }
170 else
171 {
172 //
173 // Use the guarded mutex
174 //
175 KeReleaseGuardedMutex(Descriptor->LockAddress);
176 }
177 }
178
179 /* PUBLIC FUNCTIONS ***********************************************************/
180
181 PVOID
182 NTAPI
183 ExAllocateArmPoolWithTag(IN POOL_TYPE PoolType,
184 IN SIZE_T NumberOfBytes,
185 IN ULONG Tag)
186 {
187 PPOOL_DESCRIPTOR PoolDesc;
188 PLIST_ENTRY ListHead;
189 PPOOL_HEADER Entry, NextEntry, FragmentEntry;
190 KIRQL OldIrql;
191 ULONG BlockSize, i;
192
193 //
194 // Some sanity checks
195 //
196 ASSERT(Tag != 0);
197 ASSERT(Tag != ' GIB');
198 ASSERT(NumberOfBytes != 0);
199
200 //
201 // Get the pool type and its corresponding vector for this request
202 //
203 PoolType = PoolType & BASE_POOL_TYPE_MASK;
204 PoolDesc = PoolVector[PoolType];
205 ASSERT(PoolDesc != NULL);
206
207 //
208 // Check if this is a big page allocation
209 //
210 if (NumberOfBytes > POOL_MAX_ALLOC)
211 {
212 //
213 // Then just return the number of pages requested
214 //
215 return MiAllocatePoolPages(PoolType, NumberOfBytes);
216 }
217
218 //
219 // Should never request 0 bytes from the pool, but since so many drivers do
220 // it, we'll just assume they want 1 byte, based on NT's similar behavior
221 //
222 if (!NumberOfBytes) NumberOfBytes = 1;
223
224 //
225 // A pool allocation is defined by its data, a linked list to connect it to
226 // the free list (if necessary), and a pool header to store accounting info.
227 // Calculate this size, then convert it into a block size (units of pool
228 // headers)
229 //
230 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
231 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
232 // the direct allocation of pages.
233 //
234 i = (NumberOfBytes + sizeof(POOL_HEADER) + sizeof(LIST_ENTRY) - 1) /
235 sizeof(POOL_HEADER);
236
237 //
238 // Loop in the free lists looking for a block if this size. Start with the
239 // list optimized for this kind of size lookup
240 //
241 ListHead = &PoolDesc->ListHeads[i];
242 do
243 {
244 //
245 // Are there any free entries available on this list?
246 //
247 if (!IsListEmpty(ListHead))
248 {
249 //
250 // Acquire the pool lock now
251 //
252 OldIrql = ExLockPool(PoolDesc);
253
254 //
255 // And make sure the list still has entries
256 //
257 if (IsListEmpty(ListHead))
258 {
259 //
260 // Someone raced us (and won) before we had a chance to acquire
261 // the lock.
262 //
263 // Try again!
264 //
265 ExUnlockPool(PoolDesc, OldIrql);
266 ListHead++;
267 continue;
268 }
269
270 //
271 // Remove a free entry from the list
272 // Note that due to the way we insert free blocks into multiple lists
273 // there is a guarantee that any block on this list will either be
274 // of the correct size, or perhaps larger.
275 //
276 Entry = (PPOOL_HEADER)RemoveHeadList(ListHead) - 1;
277 ASSERT(Entry->BlockSize >= i);
278 ASSERT(Entry->PoolType == 0);
279
280 //
281 // Check if this block is larger that what we need. The block could
282 // not possibly be smaller, due to the reason explained above (and
283 // we would've asserted on a checked build if this was the case).
284 //
285 if (Entry->BlockSize != i)
286 {
287 //
288 // Is there an entry before this one?
289 //
290 if (Entry->PreviousSize == 0)
291 {
292 //
293 // There isn't anyone before us, so take the next block and
294 // turn it into a fragment that contains the leftover data
295 // that we don't need to satisfy the caller's request
296 //
297 FragmentEntry = Entry + i;
298 FragmentEntry->BlockSize = Entry->BlockSize - i;
299
300 //
301 // And make it point back to us
302 //
303 FragmentEntry->PreviousSize = i;
304
305 //
306 // Now get the block that follows the new fragment and check
307 // if it's still on the same page as us (and not at the end)
308 //
309 NextEntry = FragmentEntry + FragmentEntry->BlockSize;
310 if (PAGE_ALIGN(NextEntry) != NextEntry)
311 {
312 //
313 // Adjust this next block to point to our newly created
314 // fragment block
315 //
316 NextEntry->PreviousSize = FragmentEntry->BlockSize;
317 }
318 }
319 else
320 {
321 //
322 // There is a free entry before us, which we know is smaller
323 // so we'll make this entry the fragment instead
324 //
325 FragmentEntry = Entry;
326
327 //
328 // And then we'll remove from it the actual size required.
329 // Now the entry is a leftover free fragment
330 //
331 Entry->BlockSize -= i;
332
333 //
334 // Now let's go to the next entry after the fragment (which
335 // used to point to our original free entry) and make it
336 // reference the new fragment entry instead.
337 //
338 // This is the entry that will actually end up holding the
339 // allocation!
340 //
341 Entry += Entry->BlockSize;
342 Entry->PreviousSize = FragmentEntry->BlockSize;
343
344 //
345 // And now let's go to the entry after that one and check if
346 // it's still on the same page, and not at the end
347 //
348 NextEntry = Entry + i;
349 if (PAGE_ALIGN(NextEntry) != NextEntry)
350 {
351 //
352 // Make it reference the allocation entry
353 //
354 NextEntry->PreviousSize = i;
355 }
356 }
357
358 //
359 // Now our (allocation) entry is the right size
360 //
361 Entry->BlockSize = i;
362
363 //
364 // And the next entry is now the free fragment which contains
365 // the remaining difference between how big the original entry
366 // was, and the actual size the caller needs/requested.
367 //
368 FragmentEntry->PoolType = 0;
369 BlockSize = FragmentEntry->BlockSize;
370
371 //
372 // Now check if enough free bytes remained for us to have a
373 // "full" entry, which contains enough bytes for a linked list
374 // and thus can be used for allocations (up to 8 bytes...)
375 //
376 if (BlockSize != 1)
377 {
378 //
379 // Insert the free entry into the free list for this size
380 //
381 InsertTailList(&PoolDesc->ListHeads[BlockSize - 1],
382 (PLIST_ENTRY)FragmentEntry + 1);
383 }
384 }
385
386 //
387 // We have found an entry for this allocation, so set the pool type
388 // and release the lock since we're done
389 //
390 Entry->PoolType = PoolType + 1;
391 ExUnlockPool(PoolDesc, OldIrql);
392
393 //
394 // Return the pool allocation
395 //
396 Entry->PoolTag = Tag;
397 return ++Entry;
398 }
399 } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
400
401 //
402 // There were no free entries left, so we have to allocate a new fresh page
403 //
404 Entry = MiAllocatePoolPages(PoolType, PAGE_SIZE);
405 Entry->Ulong1 = 0;
406 Entry->BlockSize = i;
407 Entry->PoolType = PoolType + 1;
408
409 //
410 // This page will have two entries -- one for the allocation (which we just
411 // created above), and one for the remaining free bytes, which we're about
412 // to create now. The free bytes are the whole page minus what was allocated
413 // and then converted into units of block headers.
414 //
415 BlockSize = (PAGE_SIZE / sizeof(POOL_HEADER)) - i;
416 FragmentEntry = Entry + i;
417 FragmentEntry->Ulong1 = 0;
418 FragmentEntry->BlockSize = BlockSize;
419 FragmentEntry->PreviousSize = i;
420
421 //
422 // Now check if enough free bytes remained for us to have a "full" entry,
423 // which contains enough bytes for a linked list and thus can be used for
424 // allocations (up to 8 bytes...)
425 //
426 if (FragmentEntry->BlockSize != 1)
427 {
428 //
429 // Excellent -- acquire the pool lock
430 //
431 OldIrql = ExLockPool(PoolDesc);
432
433 //
434 // And insert the free entry into the free list for this block size
435 //
436 InsertTailList(&PoolDesc->ListHeads[BlockSize - 1],
437 (PLIST_ENTRY)FragmentEntry + 1);
438
439 //
440 // Release the pool lock
441 //
442 ExUnlockPool(PoolDesc, OldIrql);
443 }
444
445 //
446 // And return the pool allocation
447 //
448 Entry->PoolTag = Tag;
449 return ++Entry;
450 }
451
452 PVOID
453 NTAPI
454 ExAllocateArmPool(POOL_TYPE PoolType,
455 SIZE_T NumberOfBytes)
456 {
457 //
458 // Use a default tag of "None"
459 //
460 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, 'enoN');
461 }
462
463 VOID
464 NTAPI
465 ExFreeArmPoolWithTag(IN PVOID P,
466 IN ULONG TagToFree)
467 {
468 PPOOL_HEADER Entry, NextEntry;
469 ULONG BlockSize;
470 KIRQL OldIrql;
471 POOL_TYPE PoolType;
472 PPOOL_DESCRIPTOR PoolDesc;
473 BOOLEAN Combined = FALSE;
474
475 //
476 // Quickly deal with big page allocations
477 //
478 if (PAGE_ALIGN(P) == P)
479 {
480 MiFreePoolPages(P);
481 return;
482 }
483
484 //
485 // Get the entry for this pool allocation
486 // The pointer math here may look wrong or confusing, but it is quite right
487 //
488 Entry = P;
489 Entry--;
490
491 //
492 // Get the size of the entry, and it's pool type, then load the descriptor
493 // for this pool type
494 //
495 BlockSize = Entry->BlockSize;
496 PoolType = (Entry->PoolType & 3) - 1;
497 PoolDesc = PoolVector[PoolType];
498
499 //
500 // Get the pointer to the next entry
501 //
502 NextEntry = Entry + BlockSize;
503
504 //
505 // Acquire the pool lock
506 //
507 OldIrql = ExLockPool(PoolDesc);
508
509 //
510 // Check if the next allocation is at the end of the page
511 //
512 if (PAGE_ALIGN(NextEntry) != NextEntry)
513 {
514 //
515 // We may be able to combine the block if it's free
516 //
517 if (NextEntry->PoolType == 0)
518 {
519 //
520 // The next block is free, so we'll do a combine
521 //
522 Combined = TRUE;
523
524 //
525 // Make sure there's actual data in the block -- anything smaller
526 // than this means we only have the header, so there's no linked list
527 // for us to remove
528 //
529 if ((NextEntry->BlockSize != 1))
530 {
531 //
532 // The block is at least big enough to have a linked list, so go
533 // ahead and remove it
534 //
535 RemoveEntryList((PLIST_ENTRY)NextEntry + 1);
536 }
537
538 //
539 // Our entry is now combined with the next entry
540 //
541 Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
542 }
543 }
544
545 //
546 // Now check if there was a previous entry on the same page as us
547 //
548 if (Entry->PreviousSize)
549 {
550 //
551 // Great, grab that entry and check if it's free
552 //
553 NextEntry = Entry - Entry->PreviousSize;
554 if (NextEntry->PoolType == 0)
555 {
556 //
557 // It is, so we can do a combine
558 //
559 Combined = TRUE;
560
561 //
562 // Make sure there's actual data in the block -- anything smaller
563 // than this means we only have the header so there's no linked list
564 // for us to remove
565 //
566 if ((NextEntry->BlockSize != 1))
567 {
568 //
569 // The block is at least big enough to have a linked list, so go
570 // ahead and remove it
571 //
572 RemoveEntryList((PLIST_ENTRY)NextEntry + 1);
573 }
574
575 //
576 // Combine our original block (which might've already been combined
577 // with the next block), into the previous block
578 //
579 NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
580
581 //
582 // And now we'll work with the previous block instead
583 //
584 Entry = NextEntry;
585 }
586 }
587
588 //
589 // By now, it may have been possible for our combined blocks to actually
590 // have made up a full page (if there were only 2-3 allocations on the
591 // page, they could've all been combined).
592 //
593 if ((PAGE_ALIGN(Entry) == Entry) &&
594 (PAGE_ALIGN(Entry + Entry->BlockSize) == Entry + Entry->BlockSize))
595 {
596 //
597 // In this case, release the pool lock, and free the page
598 //
599 ExUnlockPool(PoolDesc, OldIrql);
600 MiFreePoolPages(Entry);
601 return;
602 }
603
604 //
605 // Otherwise, we now have a free block (or a combination of 2 or 3)
606 //
607 Entry->PoolType = 0;
608 BlockSize = Entry->BlockSize;
609 ASSERT(BlockSize != 1);
610
611 //
612 // Check if we actually did combine it with anyone
613 //
614 if (Combined)
615 {
616 //
617 // Get the first combined block (either our original to begin with, or
618 // the one after the original, depending if we combined with the previous)
619 //
620 NextEntry = Entry + BlockSize;
621
622 //
623 // As long as the next block isn't on a page boundary, have it point
624 // back to us
625 //
626 if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
627 }
628
629 //
630 // Insert this new free block, and release the pool lock
631 //
632 InsertHeadList(&PoolDesc->ListHeads[BlockSize - 1], (PLIST_ENTRY)Entry + 1);
633 ExUnlockPool(PoolDesc, OldIrql);
634 }
635
636 VOID
637 NTAPI
638 ExFreeArmPool(PVOID P)
639 {
640 //
641 // Just free without checking for the tag
642 //
643 ExFreeArmPoolWithTag(P, 0);
644 }
645
646 /*
647 * @unimplemented
648 */
649 SIZE_T
650 NTAPI
651 ExQueryPoolBlockSize(IN PVOID PoolBlock,
652 OUT PBOOLEAN QuotaCharged)
653 {
654 //
655 // Not implemented
656 //
657 UNIMPLEMENTED;
658 return FALSE;
659 }
660
661 /*
662 * @implemented
663 */
664
665 PVOID
666 NTAPI
667 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType,
668 IN SIZE_T NumberOfBytes)
669 {
670 //
671 // Allocate the pool
672 //
673 return ExAllocatePoolWithQuotaTag(PoolType, NumberOfBytes, 'enoN');
674 }
675
676 /*
677 * @implemented
678 */
679 PVOID
680 NTAPI
681 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType,
682 IN SIZE_T NumberOfBytes,
683 IN ULONG Tag,
684 IN EX_POOL_PRIORITY Priority)
685 {
686 //
687 // Allocate the pool
688 //
689 UNIMPLEMENTED;
690 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
691 }
692
693 /*
694 * @implemented
695 */
696 PVOID
697 NTAPI
698 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType,
699 IN SIZE_T NumberOfBytes,
700 IN ULONG Tag)
701 {
702 //
703 // Allocate the pool
704 //
705 UNIMPLEMENTED;
706 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
707 }
708
709 /* EOF */