Merge trunk (r43561)
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / expool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::EXPOOL"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
18
19 #undef ExAllocatePoolWithQuota
20 #undef ExAllocatePoolWithQuotaTag
21
22 /* GLOBALS ********************************************************************/
23
24 POOL_DESCRIPTOR NonPagedPoolDescriptor;
25 PPOOL_DESCRIPTOR PoolVector[2];
26 PKGUARDED_MUTEX ExpPagedPoolMutex;
27
28 /* PRIVATE FUNCTIONS **********************************************************/
29
30 VOID
31 NTAPI
32 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor,
33 IN POOL_TYPE PoolType,
34 IN ULONG PoolIndex,
35 IN ULONG Threshold,
36 IN PVOID PoolLock)
37 {
38 PLIST_ENTRY NextEntry, LastEntry;
39
40 //
41 // Setup the descriptor based on the caller's request
42 //
43 PoolDescriptor->PoolType = PoolType;
44 PoolDescriptor->PoolIndex = PoolIndex;
45 PoolDescriptor->Threshold = Threshold;
46 PoolDescriptor->LockAddress = PoolLock;
47
48 //
49 // Initialize accounting data
50 //
51 PoolDescriptor->RunningAllocs = 0;
52 PoolDescriptor->RunningDeAllocs = 0;
53 PoolDescriptor->TotalPages = 0;
54 PoolDescriptor->TotalBytes = 0;
55 PoolDescriptor->TotalBigPages = 0;
56
57 //
58 // Nothing pending for now
59 //
60 PoolDescriptor->PendingFrees = NULL;
61 PoolDescriptor->PendingFreeDepth = 0;
62
63 //
64 // Loop all the descriptor's allocation lists and initialize them
65 //
66 NextEntry = PoolDescriptor->ListHeads;
67 LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
68 while (NextEntry < LastEntry) InitializeListHead(NextEntry++);
69 }
70
71 VOID
72 NTAPI
73 InitializePool(IN POOL_TYPE PoolType,
74 IN ULONG Threshold)
75 {
76 PPOOL_DESCRIPTOR Descriptor;
77
78 //
79 // Check what kind of pool this is
80 //
81 if (PoolType == NonPagedPool)
82 {
83 //
84 // Initialize the nonpaged pool descriptor
85 //
86 PoolVector[NonPagedPool] = &NonPagedPoolDescriptor;
87 ExInitializePoolDescriptor(PoolVector[NonPagedPool],
88 NonPagedPool,
89 0,
90 Threshold,
91 NULL);
92 }
93 else
94 {
95 //
96 // Allocate the pool descriptor
97 //
98 Descriptor = ExAllocatePoolWithTag(NonPagedPool,
99 sizeof(KGUARDED_MUTEX) +
100 sizeof(POOL_DESCRIPTOR),
101 'looP');
102 if (!Descriptor)
103 {
104 //
105 // This is really bad...
106 //
107 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
108 0,
109 -1,
110 -1,
111 -1);
112 }
113
114 //
115 // Setup the vector and guarded mutex for paged pool
116 //
117 PoolVector[PagedPool] = Descriptor;
118 ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1);
119 KeInitializeGuardedMutex(ExpPagedPoolMutex);
120 ExInitializePoolDescriptor(Descriptor,
121 PagedPool,
122 0,
123 Threshold,
124 ExpPagedPoolMutex);
125 }
126 }
127
128 FORCEINLINE
129 KIRQL
130 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
131 {
132 //
133 // Check if this is nonpaged pool
134 //
135 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
136 {
137 //
138 // Use the queued spin lock
139 //
140 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
141 }
142 else
143 {
144 //
145 // Use the guarded mutex
146 //
147 KeAcquireGuardedMutex(Descriptor->LockAddress);
148 return APC_LEVEL;
149 }
150 }
151
152 FORCEINLINE
153 VOID
154 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor,
155 IN KIRQL OldIrql)
156 {
157 //
158 // Check if this is nonpaged pool
159 //
160 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
161 {
162 //
163 // Use the queued spin lock
164 //
165 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
166 }
167 else
168 {
169 //
170 // Use the guarded mutex
171 //
172 KeReleaseGuardedMutex(Descriptor->LockAddress);
173 }
174 }
175
176 /* PUBLIC FUNCTIONS ***********************************************************/
177
178 PVOID
179 NTAPI
180 ExAllocateArmPoolWithTag(IN POOL_TYPE PoolType,
181 IN SIZE_T NumberOfBytes,
182 IN ULONG Tag)
183 {
184 PPOOL_DESCRIPTOR PoolDesc;
185 PLIST_ENTRY ListHead;
186 PPOOL_HEADER Entry, NextEntry, FragmentEntry;
187 KIRQL OldIrql;
188 ULONG BlockSize, i;
189
190 //
191 // Some sanity checks
192 //
193 ASSERT(Tag != 0);
194 ASSERT(Tag != ' GIB');
195 ASSERT(NumberOfBytes != 0);
196
197 //
198 // Get the pool type and its corresponding vector for this request
199 //
200 PoolType = PoolType & BASE_POOL_TYPE_MASK;
201 PoolDesc = PoolVector[PoolType];
202 ASSERT(PoolDesc != NULL);
203
204 //
205 // Check if this is a big page allocation
206 //
207 if (NumberOfBytes > POOL_MAX_ALLOC)
208 {
209 //
210 // Then just return the number of pages requested
211 //
212 return MiAllocatePoolPages(PoolType, NumberOfBytes);
213 }
214
215 //
216 // Should never request 0 bytes from the pool, but since so many drivers do
217 // it, we'll just assume they want 1 byte, based on NT's similar behavior
218 //
219 if (!NumberOfBytes) NumberOfBytes = 1;
220
221 //
222 // A pool allocation is defined by its data, a linked list to connect it to
223 // the free list (if necessary), and a pool header to store accounting info.
224 // Calculate this size, then convert it into a block size (units of pool
225 // headers)
226 //
227 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
228 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
229 // the direct allocation of pages.
230 //
231 i = (NumberOfBytes + sizeof(POOL_HEADER) + sizeof(LIST_ENTRY) - 1) /
232 sizeof(POOL_HEADER);
233
234 //
235 // Loop in the free lists looking for a block if this size. Start with the
236 // list optimized for this kind of size lookup
237 //
238 ListHead = &PoolDesc->ListHeads[i];
239 do
240 {
241 //
242 // Are there any free entries available on this list?
243 //
244 if (!IsListEmpty(ListHead))
245 {
246 //
247 // Acquire the pool lock now
248 //
249 OldIrql = ExLockPool(PoolDesc);
250
251 //
252 // And make sure the list still has entries
253 //
254 if (IsListEmpty(ListHead))
255 {
256 //
257 // Someone raced us (and won) before we had a chance to acquire
258 // the lock.
259 //
260 // Try again!
261 //
262 ExUnlockPool(PoolDesc, OldIrql);
263 ListHead++;
264 continue;
265 }
266
267 //
268 // Remove a free entry from the list
269 // Note that due to the way we insert free blocks into multiple lists
270 // there is a guarantee that any block on this list will either be
271 // of the correct size, or perhaps larger.
272 //
273 Entry = (PPOOL_HEADER)RemoveHeadList(ListHead) - 1;
274 ASSERT(Entry->BlockSize >= i);
275 ASSERT(Entry->PoolType == 0);
276
277 //
278 // Check if this block is larger that what we need. The block could
279 // not possibly be smaller, due to the reason explained above (and
280 // we would've asserted on a checked build if this was the case).
281 //
282 if (Entry->BlockSize != i)
283 {
284 //
285 // Is there an entry before this one?
286 //
287 if (Entry->PreviousSize == 0)
288 {
289 //
290 // There isn't anyone before us, so take the next block and
291 // turn it into a fragment that contains the leftover data
292 // that we don't need to satisfy the caller's request
293 //
294 FragmentEntry = Entry + i;
295 FragmentEntry->BlockSize = Entry->BlockSize - i;
296
297 //
298 // And make it point back to us
299 //
300 FragmentEntry->PreviousSize = i;
301
302 //
303 // Now get the block that follows the new fragment and check
304 // if it's still on the same page as us (and not at the end)
305 //
306 NextEntry = FragmentEntry + FragmentEntry->BlockSize;
307 if (PAGE_ALIGN(NextEntry) != NextEntry)
308 {
309 //
310 // Adjust this next block to point to our newly created
311 // fragment block
312 //
313 NextEntry->PreviousSize = FragmentEntry->BlockSize;
314 }
315 }
316 else
317 {
318 //
319 // There is a free entry before us, which we know is smaller
320 // so we'll make this entry the fragment instead
321 //
322 FragmentEntry = Entry;
323
324 //
325 // And then we'll remove from it the actual size required.
326 // Now the entry is a leftover free fragment
327 //
328 Entry->BlockSize -= i;
329
330 //
331 // Now let's go to the next entry after the fragment (which
332 // used to point to our original free entry) and make it
333 // reference the new fragment entry instead.
334 //
335 // This is the entry that will actually end up holding the
336 // allocation!
337 //
338 Entry += Entry->BlockSize;
339 Entry->PreviousSize = FragmentEntry->BlockSize;
340
341 //
342 // And now let's go to the entry after that one and check if
343 // it's still on the same page, and not at the end
344 //
345 NextEntry = Entry + i;
346 if (PAGE_ALIGN(NextEntry) != NextEntry)
347 {
348 //
349 // Make it reference the allocation entry
350 //
351 NextEntry->PreviousSize = i;
352 }
353 }
354
355 //
356 // Now our (allocation) entry is the right size
357 //
358 Entry->BlockSize = i;
359
360 //
361 // And the next entry is now the free fragment which contains
362 // the remaining difference between how big the original entry
363 // was, and the actual size the caller needs/requested.
364 //
365 FragmentEntry->PoolType = 0;
366 BlockSize = FragmentEntry->BlockSize;
367
368 //
369 // Now check if enough free bytes remained for us to have a
370 // "full" entry, which contains enough bytes for a linked list
371 // and thus can be used for allocations (up to 8 bytes...)
372 //
373 if (BlockSize != 1)
374 {
375 //
376 // Insert the free entry into the free list for this size
377 //
378 InsertTailList(&PoolDesc->ListHeads[BlockSize - 1],
379 (PLIST_ENTRY)FragmentEntry + 1);
380 }
381 }
382
383 //
384 // We have found an entry for this allocation, so set the pool type
385 // and release the lock since we're done
386 //
387 Entry->PoolType = PoolType + 1;
388 ExUnlockPool(PoolDesc, OldIrql);
389
390 //
391 // Return the pool allocation
392 //
393 Entry->PoolTag = Tag;
394 return ++Entry;
395 }
396 } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
397
398 //
399 // There were no free entries left, so we have to allocate a new fresh page
400 //
401 Entry = MiAllocatePoolPages(PoolType, PAGE_SIZE);
402 Entry->Ulong1 = 0;
403 Entry->BlockSize = i;
404 Entry->PoolType = PoolType + 1;
405
406 //
407 // This page will have two entries -- one for the allocation (which we just
408 // created above), and one for the remaining free bytes, which we're about
409 // to create now. The free bytes are the whole page minus what was allocated
410 // and then converted into units of block headers.
411 //
412 BlockSize = (PAGE_SIZE / sizeof(POOL_HEADER)) - i;
413 FragmentEntry = Entry + i;
414 FragmentEntry->Ulong1 = 0;
415 FragmentEntry->BlockSize = BlockSize;
416 FragmentEntry->PreviousSize = i;
417
418 //
419 // Now check if enough free bytes remained for us to have a "full" entry,
420 // which contains enough bytes for a linked list and thus can be used for
421 // allocations (up to 8 bytes...)
422 //
423 if (FragmentEntry->BlockSize != 1)
424 {
425 //
426 // Excellent -- acquire the pool lock
427 //
428 OldIrql = ExLockPool(PoolDesc);
429
430 //
431 // And insert the free entry into the free list for this block size
432 //
433 InsertTailList(&PoolDesc->ListHeads[BlockSize - 1],
434 (PLIST_ENTRY)FragmentEntry + 1);
435
436 //
437 // Release the pool lock
438 //
439 ExUnlockPool(PoolDesc, OldIrql);
440 }
441
442 //
443 // And return the pool allocation
444 //
445 Entry->PoolTag = Tag;
446 return ++Entry;
447 }
448
449 PVOID
450 NTAPI
451 ExAllocateArmPool(POOL_TYPE PoolType,
452 SIZE_T NumberOfBytes)
453 {
454 //
455 // Use a default tag of "None"
456 //
457 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, 'enoN');
458 }
459
460 VOID
461 NTAPI
462 ExFreeArmPoolWithTag(IN PVOID P,
463 IN ULONG TagToFree)
464 {
465 PPOOL_HEADER Entry, NextEntry;
466 ULONG BlockSize;
467 KIRQL OldIrql;
468 POOL_TYPE PoolType;
469 PPOOL_DESCRIPTOR PoolDesc;
470 BOOLEAN Combined = FALSE;
471
472 //
473 // Quickly deal with big page allocations
474 //
475 if (PAGE_ALIGN(P) == P)
476 {
477 MiFreePoolPages(P);
478 return;
479 }
480
481 //
482 // Get the entry for this pool allocation
483 // The pointer math here may look wrong or confusing, but it is quite right
484 //
485 Entry = P;
486 Entry--;
487
488 //
489 // Get the size of the entry, and it's pool type, then load the descriptor
490 // for this pool type
491 //
492 BlockSize = Entry->BlockSize;
493 PoolType = (Entry->PoolType & 3) - 1;
494 PoolDesc = PoolVector[PoolType];
495
496 //
497 // Get the pointer to the next entry
498 //
499 NextEntry = Entry + BlockSize;
500
501 //
502 // Acquire the pool lock
503 //
504 OldIrql = ExLockPool(PoolDesc);
505
506 //
507 // Check if the next allocation is at the end of the page
508 //
509 if (PAGE_ALIGN(NextEntry) != NextEntry)
510 {
511 //
512 // We may be able to combine the block if it's free
513 //
514 if (NextEntry->PoolType == 0)
515 {
516 //
517 // The next block is free, so we'll do a combine
518 //
519 Combined = TRUE;
520
521 //
522 // Make sure there's actual data in the block -- anything smaller
523 // than this means we only have the header, so there's no linked list
524 // for us to remove
525 //
526 if ((NextEntry->BlockSize != 1))
527 {
528 //
529 // The block is at least big enough to have a linked list, so go
530 // ahead and remove it
531 //
532 RemoveEntryList((PLIST_ENTRY)NextEntry + 1);
533 }
534
535 //
536 // Our entry is now combined with the next entry
537 //
538 Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
539 }
540 }
541
542 //
543 // Now check if there was a previous entry on the same page as us
544 //
545 if (Entry->PreviousSize)
546 {
547 //
548 // Great, grab that entry and check if it's free
549 //
550 NextEntry = Entry - Entry->PreviousSize;
551 if (NextEntry->PoolType == 0)
552 {
553 //
554 // It is, so we can do a combine
555 //
556 Combined = TRUE;
557
558 //
559 // Make sure there's actual data in the block -- anything smaller
560 // than this means we only have the header so there's no linked list
561 // for us to remove
562 //
563 if ((NextEntry->BlockSize != 1))
564 {
565 //
566 // The block is at least big enough to have a linked list, so go
567 // ahead and remove it
568 //
569 RemoveEntryList((PLIST_ENTRY)NextEntry + 1);
570 }
571
572 //
573 // Combine our original block (which might've already been combined
574 // with the next block), into the previous block
575 //
576 NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
577
578 //
579 // And now we'll work with the previous block instead
580 //
581 Entry = NextEntry;
582 }
583 }
584
585 //
586 // By now, it may have been possible for our combined blocks to actually
587 // have made up a full page (if there were only 2-3 allocations on the
588 // page, they could've all been combined).
589 //
590 if ((PAGE_ALIGN(Entry) == Entry) &&
591 (PAGE_ALIGN(Entry + Entry->BlockSize) == Entry + Entry->BlockSize))
592 {
593 //
594 // In this case, release the pool lock, and free the page
595 //
596 ExUnlockPool(PoolDesc, OldIrql);
597 MiFreePoolPages(Entry);
598 return;
599 }
600
601 //
602 // Otherwise, we now have a free block (or a combination of 2 or 3)
603 //
604 Entry->PoolType = 0;
605 BlockSize = Entry->BlockSize;
606 ASSERT(BlockSize != 1);
607
608 //
609 // Check if we actually did combine it with anyone
610 //
611 if (Combined)
612 {
613 //
614 // Get the first combined block (either our original to begin with, or
615 // the one after the original, depending if we combined with the previous)
616 //
617 NextEntry = Entry + BlockSize;
618
619 //
620 // As long as the next block isn't on a page boundary, have it point
621 // back to us
622 //
623 if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
624 }
625
626 //
627 // Insert this new free block, and release the pool lock
628 //
629 InsertHeadList(&PoolDesc->ListHeads[BlockSize - 1], (PLIST_ENTRY)Entry + 1);
630 ExUnlockPool(PoolDesc, OldIrql);
631 }
632
633 VOID
634 NTAPI
635 ExFreeArmPool(PVOID P)
636 {
637 //
638 // Just free without checking for the tag
639 //
640 ExFreeArmPoolWithTag(P, 0);
641 }
642
643 /*
644 * @unimplemented
645 */
646 SIZE_T
647 NTAPI
648 ExQueryPoolBlockSize(IN PVOID PoolBlock,
649 OUT PBOOLEAN QuotaCharged)
650 {
651 //
652 // Not implemented
653 //
654 UNIMPLEMENTED;
655 return FALSE;
656 }
657
658 /*
659 * @implemented
660 */
661
662 PVOID
663 NTAPI
664 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType,
665 IN SIZE_T NumberOfBytes)
666 {
667 //
668 // Allocate the pool
669 //
670 return ExAllocatePoolWithQuotaTag(PoolType, NumberOfBytes, 'enoN');
671 }
672
673 /*
674 * @implemented
675 */
676 PVOID
677 NTAPI
678 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType,
679 IN SIZE_T NumberOfBytes,
680 IN ULONG Tag,
681 IN EX_POOL_PRIORITY Priority)
682 {
683 //
684 // Allocate the pool
685 //
686 UNIMPLEMENTED;
687 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
688 }
689
690 /*
691 * @implemented
692 */
693 PVOID
694 NTAPI
695 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType,
696 IN SIZE_T NumberOfBytes,
697 IN ULONG Tag)
698 {
699 //
700 // Allocate the pool
701 //
702 UNIMPLEMENTED;
703 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
704 }
705
706 /* EOF */