54a5fb50f96dccf9d5a3cb02acef28d9f73776bd
[reactos.git] / reactos / ntoskrnl / mm / rpoolmgr.h
1 /* $Id$
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/RPoolMgr.h
6 * PURPOSE: A semi-generic reuseable Pool implementation
7 * PROGRAMMER: Royce Mitchell III
8 * UPDATE HISTORY:
9 */
10
11 #ifndef RPOOLMGR_H
12 #define RPOOLMGR_H
13
14 typedef unsigned long rulong;
15
16 #define R_IS_POOL_PTR(pool,ptr) (void*)(ptr) >= pool->UserBase && (ULONG_PTR)(ptr) < ((ULONG_PTR)pool->UserBase + pool->UserSize)
17 #define R_ASSERT_PTR(pool,ptr) ASSERT( R_IS_POOL_PTR(pool,ptr) )
18 #define R_ASSERT_SIZE(pool,sz) ASSERT( sz > (sizeof(R_USED)+2*R_RZ) && sz >= sizeof(R_FREE) && sz < pool->UserSize )
19
20 #ifndef R_ROUND_UP
21 #define R_ROUND_UP(x,s) ((PVOID)(((ULONG_PTR)(x)+(s)-1) & ~((ULONG_PTR)(s)-1)))
22 #endif//R_ROUND_UP
23
24 #ifndef R_ROUND_DOWN
25 #define R_ROUND_DOWN(x,s) ((PVOID)(((ULONG_PTR)(x)) & ~((ULONG_PTR)(s)-1)))
26 #endif//R_ROUND_DOWN
27
28 #ifndef R_QUEMIN
29 // R_QUEMIN is the minimum number of entries to keep in a que
30 #define R_QUEMIN 0
31 #endif//R_QUEMIN
32
33 #ifndef R_QUECOUNT
34 // 16, 32, 64, 128, 256, 512
35 #define R_QUECOUNT 6
36 #endif//R_QUECOUNT
37
38 #ifndef R_RZ
39 // R_RZ is the redzone size
40 #define R_RZ 4
41 #endif//R_RZ
42
43 #ifndef R_RZ_LOVALUE
44 #define R_RZ_LOVALUE 0x87
45 #endif//R_RZ_LOVALUE
46
47 #ifndef R_RZ_HIVALUE
48 #define R_RZ_HIVALUE 0xA5
49 #endif//R_RZ_HIVALUE
50
51 #ifndef R_STACK
52 // R_STACK is the number of stack entries to store in blocks for debug purposes
53 #define R_STACK 6
54 #else // R_STACK
55 #if R_STACK > 0 && R_STACK < 6
56 /* Increase the frame depth to get a reasonable back trace */
57 #undef R_STACK
58 #define R_STACK 6
59 #endif // R_STACK > 0 && R_STACK < 6
60 #endif//R_STACK
61
62 #ifndef R_TAG
63 // R_TAG do we keep track of tags on a per-memory block basis?
64 #define R_TAG 0
65 #endif//R_TAG
66
67 #ifdef R_MAGIC
68 # ifndef R_FREE_MAGIC
69 # define R_FREE_MAGIC (rulong)(('F'<<0) + ('r'<<8) + ('E'<<16) + ('e'<<24))
70 # endif//R_FREE_MAGIC
71 # ifndef R_USED_MAGIC
72 # define R_USED_MAGIC (rulong)(('u'<<0) + ('S'<<8) + ('e'<<16) + ('D'<<24))
73 # endif//R_USED_MAGIC
74 #endif//R_MAGIC
75
76 // **IMPORTANT NOTE** Magic, PrevSize, Size and Status must be at same offset
77 // in both the R_FREE and R_USED structures
78
79 typedef struct _R_FREE
80 {
81 #ifdef R_MAGIC
82 rulong FreeMagic;
83 #endif//R_MAGIC
84 rulong PrevSize : 30;
85 rulong Status : 2;
86 rulong Size;
87 #if R_STACK
88 ULONG_PTR LastOwnerStack[R_STACK];
89 #endif//R_STACK
90 struct _R_FREE* NextFree;
91 struct _R_FREE* PrevFree;
92 }
93 R_FREE, *PR_FREE;
94
95 typedef struct _R_USED
96 {
97 #ifdef R_MAGIC
98 rulong UsedMagic;
99 #endif//R_MAGIC
100 rulong PrevSize : 30;
101 rulong Status : 2;
102 rulong Size;
103 #if R_STACK
104 ULONG_PTR LastOwnerStack[R_STACK];
105 #endif//R_STACK
106 struct _R_USED* NextUsed;
107 #if R_RZ
108 rulong UserSize; // how many bytes the user actually asked for...
109 #endif//R_RZ
110 rulong Tag;
111 }
112 R_USED, *PR_USED;
113
114 typedef struct _R_QUE
115 {
116 rulong Count;
117 PR_USED First, Last;
118 }
119 R_QUE, *PR_QUE;
120
121 typedef struct _R_POOL
122 {
123 void* PoolBase;
124 rulong PoolSize;
125 void* UserBase;
126 rulong UserSize;
127 rulong Alignments[3];
128 PR_FREE FirstFree, LastFree;
129 R_QUE Que[R_QUECOUNT][3];
130 R_MUTEX Mutex;
131 }
132 R_POOL, *PR_POOL;
133
134 #if !R_STACK
135 #define RiPrintLastOwner(Block)
136 #else
137 static void
138 RiPrintLastOwner ( PR_USED Block )
139 {
140 int i;
141 for ( i = 0; i < R_STACK; i++ )
142 {
143 if ( Block->LastOwnerStack[i] != 0xDEADBEEF )
144 {
145 R_DEBUG(" ");
146 if (!R_PRINT_ADDRESS ((PVOID)Block->LastOwnerStack[i]) )
147 {
148 R_DEBUG("<%X>", Block->LastOwnerStack[i] );
149 }
150 }
151 }
152 }
153 #endif//R_STACK
154
155 static int
156 RQueWhich ( rulong size )
157 {
158 rulong que, quesize;
159 for ( que=0, quesize=16; que < R_QUECOUNT; que++, quesize<<=1 )
160 {
161 if ( quesize >= size )
162 {
163 return que;
164 }
165 }
166 return -1;
167 }
168
169 static void
170 RQueInit ( PR_QUE que )
171 {
172 que->Count = 0;
173 que->First = NULL;
174 que->Last = NULL;
175 }
176
177 static void
178 RQueAdd ( PR_QUE que, PR_USED Item )
179 {
180 ASSERT(Item);
181 Item->Status = 2;
182 Item->NextUsed = NULL;
183 ++que->Count;
184 if ( !que->Last )
185 {
186 que->First = que->Last = Item;
187 return;
188 }
189 ASSERT(!que->Last->NextUsed);
190 que->Last->NextUsed = Item;
191 que->Last = Item;
192 }
193
194 static PR_USED
195 RQueRemove ( PR_QUE que )
196 {
197 PR_USED Item;
198 #if R_QUEMIN
199 if ( que->count < R_QUEMIN )
200 return NULL;
201 #endif
202 if ( !que->First )
203 return NULL;
204 Item = que->First;
205 que->First = Item->NextUsed;
206 if ( !--que->Count )
207 {
208 ASSERT ( !que->First );
209 que->Last = NULL;
210 }
211 Item->Status = 0;
212 return Item;
213 }
214
215 static void
216 RPoolAddFree ( PR_POOL pool, PR_FREE Item )
217 {
218 ASSERT(pool);
219 ASSERT(Item);
220 if ( !pool->FirstFree )
221 {
222 pool->FirstFree = pool->LastFree = Item;
223 Item->NextFree = NULL;
224 }
225 else
226 {
227 pool->FirstFree->PrevFree = Item;
228 Item->NextFree = pool->FirstFree;
229 pool->FirstFree = Item;
230 }
231 Item->PrevFree = NULL;
232 }
233
234 static void
235 RPoolRemoveFree ( PR_POOL pool, PR_FREE Item )
236 {
237 ASSERT(pool);
238 ASSERT(Item);
239 if ( Item->NextFree )
240 Item->NextFree->PrevFree = Item->PrevFree;
241 else
242 {
243 ASSERT ( pool->LastFree == Item );
244 pool->LastFree = Item->PrevFree;
245 }
246 if ( Item->PrevFree )
247 Item->PrevFree->NextFree = Item->NextFree;
248 else
249 {
250 ASSERT ( pool->FirstFree == Item );
251 pool->FirstFree = Item->NextFree;
252 }
253 #ifdef DBG
254 Item->NextFree = Item->PrevFree = (PR_FREE)(ULONG_PTR)0xDEADBEEF;
255 #endif//DBG
256 }
257
258 #if !R_STACK
259 #define RFreeFillStack(free)
260 #define RUsedFillStack(used)
261 #else
262 static void
263 RFreeFillStack ( PR_FREE free )
264 {
265 int i;
266 ULONG stack[R_STACK+3]; // need to skip 3 known levels of stack trace
267 memset ( stack, 0xCD, sizeof(stack) );
268 R_GET_STACK_FRAMES ( stack, R_STACK+3 );
269 for ( i = 0; i < R_STACK; i++ )
270 free->LastOwnerStack[i] = stack[i+3];
271 }
272
273 static void
274 RUsedFillStack ( PR_USED used )
275 {
276 int i;
277 ULONG stack[R_STACK+2]; // need to skip 2 known levels of stack trace
278 memset ( stack, 0xCD, sizeof(stack) );
279 R_GET_STACK_FRAMES ( stack, R_STACK+2 );
280 for ( i = 0; i < R_STACK; i++ )
281 used->LastOwnerStack[i] = stack[i+2];
282 }
283 #endif
284
285 static PR_FREE
286 RFreeInit ( void* memory )
287 {
288 PR_FREE block = (PR_FREE)memory;
289 #if R_FREEMAGIC
290 block->FreeMagic = R_FREE_MAGIC;
291 #endif//R_FREEMAGIC
292 block->Status = 0;
293 RFreeFillStack ( block );
294 #ifdef DBG
295 block->PrevFree = block->NextFree = (PR_FREE)(ULONG_PTR)0xDEADBEEF;
296 #endif//DBG
297 return block;
298 }
299
300 PR_POOL
301 RPoolInit ( void* PoolBase, rulong PoolSize, int align1, int align2, int align3 )
302 {
303 int align, que;
304 PR_POOL pool = (PR_POOL)PoolBase;
305
306 pool->PoolBase = PoolBase;
307 pool->PoolSize = PoolSize;
308 pool->UserBase = (char*)pool->PoolBase + sizeof(R_POOL);
309 pool->UserSize = PoolSize - sizeof(R_POOL);
310 pool->Alignments[0] = align1;
311 pool->Alignments[1] = align2;
312 pool->Alignments[2] = align3;
313 pool->FirstFree = pool->LastFree = NULL;
314
315 RPoolAddFree ( pool,
316 RFreeInit ( pool->UserBase ));
317
318 pool->FirstFree->PrevSize = 0;
319 pool->FirstFree->Size = pool->UserSize;
320
321 for ( que = 0; que < R_QUECOUNT; que++ )
322 {
323 for ( align = 0; align < 3; align++ )
324 {
325 RQueInit ( &pool->Que[que][align] );
326 }
327 }
328 return pool;
329 }
330
331 static const char*
332 RFormatTag ( rulong Tag, char* buf )
333 {
334 int i;
335 *(rulong*)&buf[0] = Tag;
336 buf[4] = 0;
337 for ( i = 0; i < 4; i++ )
338 {
339 if ( !buf[i] )
340 buf[i] = ' ';
341 }
342 return buf;
343 }
344
345 #if !R_RZ
346 #define RUsedRedZoneCheck(pUsed,Addr,file,line, printzone)
347 #else//R_RZ
348 static void
349 RiBadBlock ( PR_USED pUsed, char* Addr, const char* violation, const char* file, int line, int printzone )
350 {
351 char tag[5];
352 unsigned int i;
353
354 R_DEBUG("%s(%i): %s detected for paged pool address 0x%x\n",
355 file, line, violation, Addr );
356
357 #ifdef R_MAGIC
358 R_DEBUG ( "UsedMagic 0x%x, ", pUsed->UsedMagic );
359 #endif//R_MAGIC
360 R_DEBUG ( "Tag %s(%X), Size %i, UserSize %i",
361 RFormatTag(pUsed->Tag,tag),
362 pUsed->Tag,
363 pUsed->Size,
364 pUsed->UserSize );
365
366 if ( printzone )
367 {
368 unsigned char* HiZone = (unsigned char*)Addr + pUsed->UserSize;
369 unsigned char* LoZone = (unsigned char*)Addr - R_RZ; // this is to simplify indexing below...
370 R_DEBUG ( ", LoZone " );
371 for ( i = 0; i < R_RZ; i++ )
372 R_DEBUG ( "%02x", LoZone[i] );
373 R_DEBUG ( ", HiZone " );
374 for ( i = 0; i < R_RZ; i++ )
375 R_DEBUG ( "%02x", HiZone[i] );
376 }
377 R_DEBUG ( "\n" );
378
379 R_DEBUG ( "First few Stack Frames:" );
380 RiPrintLastOwner ( pUsed );
381 R_DEBUG ( "\n" );
382
383 R_DEBUG ( "Contents of Block:\n" );
384 for ( i = 0; i < 8*16 && i < pUsed->UserSize; i += 16 )
385 {
386 int j;
387 R_DEBUG ( "%04X ", i );
388 for ( j = 0; j < 16; j++ )
389 {
390 if ( i+j < pUsed->UserSize )
391 {
392 R_DEBUG ( "%02X ", (unsigned)(unsigned char)Addr[i+j] );
393 }
394 else
395 {
396 R_DEBUG ( " " );
397 }
398 }
399 R_DEBUG(" ");
400 for ( j = 0; j < 16; j++ )
401 {
402 if ( i+j < pUsed->UserSize )
403 {
404 char c = Addr[i+j];
405 if ( c < 0x20 || c > 0x7E )
406 c = '.';
407 R_DEBUG ( "%c", c );
408 }
409 else
410 {
411 R_DEBUG ( " " );
412 }
413 }
414 R_DEBUG("\n");
415 }
416 R_PANIC();
417 }
418 static void
419 RUsedRedZoneCheck ( PR_POOL pool, PR_USED pUsed, char* Addr, const char* file, int line )
420 {
421 int i;
422 unsigned char *LoZone, *HiZone;
423 int bLow = 1;
424 int bHigh = 1;
425
426 ASSERT ( Addr >= (char*)pool->UserBase && Addr < ((char*)pool->UserBase + pool->UserSize - 16) );
427 #ifdef R_MAGIC
428 if ( pUsed->UsedMagic == MM_PPOOL_FREEMAGIC )
429 {
430 pUsed->UserSize = 0; // just to keep from confusion, MmpBadBlock() doesn't return...
431 RiBadBlock ( pUsed, Addr, "double-free", file, line, 0 );
432 }
433 if ( pUsed->UsedMagic != MM_PPOOL_USEDMAGIC )
434 {
435 RiBadBlock ( pUsed, Addr, "bad magic", file, line, 0 );
436 }
437 #endif//R_MAGIC
438 switch ( pUsed->Status )
439 {
440 case 0: // freed into main pool
441 case 2: // in ques
442 RiBadBlock ( pUsed, Addr, "double-free", file, line, 0 );
443 // no need for break here - RiBadBlock doesn't return
444 case 1: // allocated - this is okay
445 break;
446 default:
447 RiBadBlock ( pUsed, Addr, "corrupt status", file, line, 0 );
448 }
449 if ( pUsed->Status != 1 )
450 {
451 RiBadBlock ( pUsed, Addr, "double-free", file, line, 0 );
452 }
453 if ( pUsed->Size > pool->PoolSize || pUsed->Size == 0 )
454 {
455 RiBadBlock ( pUsed, Addr, "invalid size", file, line, 0 );
456 }
457 if ( pUsed->UserSize > pool->PoolSize || pUsed->UserSize == 0 )
458 {
459 RiBadBlock ( pUsed, Addr, "invalid user size", file, line, 0 );
460 }
461 HiZone = (unsigned char*)Addr + pUsed->UserSize;
462 LoZone = (unsigned char*)Addr - R_RZ; // this is to simplify indexing below...
463 for ( i = 0; i < R_RZ && bLow && bHigh; i++ )
464 {
465 bLow = bLow && ( LoZone[i] == R_RZ_LOVALUE );
466 bHigh = bHigh && ( HiZone[i] == R_RZ_HIVALUE );
467 }
468 if ( !bLow || !bHigh )
469 {
470 const char* violation = "High and Low-side redzone overwrite";
471 if ( bHigh ) // high is okay, so it was just low failed
472 violation = "Low-side redzone overwrite";
473 else if ( bLow ) // low side is okay, so it was just high failed
474 violation = "High-side redzone overwrite";
475 RiBadBlock ( pUsed, Addr, violation, file, line, 1 );
476 }
477 }
478 #endif//R_RZ
479
480 PR_FREE
481 RPreviousBlock ( PR_FREE Block )
482 {
483 if ( Block->PrevSize > 0 )
484 return (PR_FREE)( (char*)Block - Block->PrevSize );
485 return NULL;
486 }
487
488 PR_FREE
489 RNextBlock ( PR_POOL pool, PR_FREE Block )
490 {
491 PR_FREE NextBlock = (PR_FREE)( (char*)Block + Block->Size );
492 if ( (char*)NextBlock >= (char*)pool->UserBase + pool->UserSize )
493 NextBlock = NULL;
494 return NextBlock;
495 }
496
497 inline static void*
498 RHdrToBody ( void* blk )
499 /*
500 * FUNCTION: Translate a block header address to the corresponding block
501 * address (internal)
502 */
503 {
504 return ( (void *) ((char*)blk + sizeof(R_USED) + R_RZ) );
505 }
506
507 inline static PR_USED
508 RBodyToHdr ( void* addr )
509 {
510 return (PR_USED)
511 ( ((char*)addr) - sizeof(R_USED) - R_RZ );
512 }
513
514 static int
515 RiInFreeChain ( PR_POOL pool, PR_FREE Block )
516 {
517 PR_FREE Free;
518 Free = pool->FirstFree;
519 if ( Free == Block )
520 return 1;
521 while ( Free != Block )
522 {
523 Free = Free->NextFree;
524 if ( !Free )
525 return 0;
526 }
527 return 1;
528 }
529
530 static void
531 RPoolRedZoneCheck ( PR_POOL pool, const char* file, int line )
532 {
533 {
534 PR_USED Block = (PR_USED)pool->UserBase;
535 PR_USED NextBlock;
536
537 for ( ;; )
538 {
539 switch ( Block->Status )
540 {
541 case 0: // block is in chain
542 ASSERT ( RiInFreeChain ( pool, (PR_FREE)Block ) );
543 break;
544 case 1: // block is allocated
545 RUsedRedZoneCheck ( pool, Block, RHdrToBody(Block), file, line );
546 break;
547 case 2: // block is in que
548 // nothing to verify here yet
549 break;
550 default:
551 ASSERT ( !"invalid status in memory block found in pool!" );
552 }
553 NextBlock = (PR_USED)RNextBlock(pool,(PR_FREE)Block);
554 if ( !NextBlock )
555 break;
556 ASSERT ( NextBlock->PrevSize == Block->Size );
557 Block = NextBlock;
558 }
559 }
560 {
561 // now let's step through the list of free pointers and verify
562 // each one can be found by size-jumping...
563 PR_FREE Free = (PR_FREE)pool->FirstFree;
564 while ( Free )
565 {
566 PR_FREE NextFree = (PR_FREE)pool->UserBase;
567 if ( Free != NextFree )
568 {
569 while ( NextFree != Free )
570 {
571 NextFree = RNextBlock ( pool, NextFree );
572 ASSERT(NextFree);
573 }
574 }
575 Free = Free->NextFree;
576 }
577 }
578 }
579
580 static void
581 RSetSize ( PR_POOL pool, PR_FREE Block, rulong NewSize, PR_FREE NextBlock )
582 {
583 R_ASSERT_PTR(pool,Block);
584 ASSERT ( NewSize < pool->UserSize );
585 ASSERT ( NewSize >= sizeof(R_FREE) );
586 Block->Size = NewSize;
587 if ( !NextBlock )
588 NextBlock = RNextBlock ( pool, Block );
589 if ( NextBlock )
590 NextBlock->PrevSize = NewSize;
591 }
592
593 static PR_FREE
594 RFreeSplit ( PR_POOL pool, PR_FREE Block, rulong NewSize )
595 {
596 PR_FREE NewBlock = (PR_FREE)((char*)Block + NewSize);
597 RSetSize ( pool, NewBlock, Block->Size - NewSize, NULL );
598 RSetSize ( pool, Block, NewSize, NewBlock );
599 RFreeInit ( NewBlock );
600 RPoolAddFree ( pool, NewBlock );
601 return NewBlock;
602 }
603
604 static void
605 RFreeMerge ( PR_POOL pool, PR_FREE First, PR_FREE Second )
606 {
607 ASSERT ( RPreviousBlock(Second) == First );
608 ASSERT ( First->Size == Second->PrevSize );
609 RPoolRemoveFree ( pool, Second );
610 RSetSize ( pool, First, First->Size + Second->Size, NULL );
611 }
612
613 static void
614 RPoolReclaim ( PR_POOL pool, PR_FREE FreeBlock )
615 {
616 PR_FREE NextBlock, PreviousBlock;
617
618 RFreeInit ( FreeBlock );
619 RPoolAddFree ( pool, FreeBlock );
620
621 // TODO FIXME - don't merge and always insert freed blocks at the end for debugging purposes...
622
623 /*
624 * If the next block is immediately adjacent to the newly freed one then
625 * merge them.
626 * PLEASE DO NOT WIPE OUT 'MAGIC' OR 'LASTOWNER' DATA FOR MERGED FREE BLOCKS
627 */
628 NextBlock = RNextBlock ( pool, FreeBlock );
629 if ( NextBlock != NULL && !NextBlock->Status )
630 {
631 RFreeMerge ( pool, FreeBlock, NextBlock );
632 }
633
634 /*
635 * If the previous block is adjacent to the newly freed one then
636 * merge them.
637 * PLEASE DO NOT WIPE OUT 'MAGIC' OR 'LASTOWNER' DATA FOR MERGED FREE BLOCKS
638 */
639 PreviousBlock = RPreviousBlock ( FreeBlock );
640 if ( PreviousBlock != NULL && !PreviousBlock->Status )
641 {
642 RFreeMerge ( pool, PreviousBlock, FreeBlock );
643 }
644 }
645
646 static void
647 RiUsedInit ( PR_USED Block, rulong Tag )
648 {
649 Block->Status = 1;
650 RUsedFillStack ( Block );
651 #if R_MAGIC
652 Block->UsedMagic = R_USED_MAGIC;
653 #endif//R_MAGIC
654 //ASSERT_SIZE ( Block->Size );
655
656 // now add the block to the used block list
657 #ifdef DBG
658 Block->NextUsed = (PR_USED)(ULONG_PTR)0xDEADBEEF;
659 #endif//R_USED_LIST
660
661 Block->Tag = Tag;
662 }
663
664 #if !R_RZ
665 #define RiUsedInitRedZone(Block,UserSize)
666 #else//R_RZ
667 static void
668 RiUsedInitRedZone ( PR_USED Block, rulong UserSize )
669 {
670 // write out buffer-overrun detection bytes
671 char* Addr = (char*)RHdrToBody(Block);
672 Block->UserSize = UserSize;
673 memset ( Addr - R_RZ, R_RZ_LOVALUE, R_RZ );
674 memset ( Addr + Block->UserSize, R_RZ_HIVALUE, R_RZ );
675 #ifdef DBG
676 memset ( Addr, 0xCD, UserSize );
677 #endif//DBG
678 }
679 #endif//R_RZ
680
681 static void*
682 RPoolAlloc ( PR_POOL pool, rulong NumberOfBytes, rulong Tag, rulong align )
683 {
684 PR_USED NewBlock;
685 PR_FREE BestBlock,
686 NextBlock,
687 PreviousBlock,
688 BestPreviousBlock,
689 CurrentBlock;
690 void* BestAlignedAddr;
691 int que,
692 queBytes = NumberOfBytes;
693 rulong BlockSize,
694 Alignment;
695 int que_reclaimed = 0;
696
697 ASSERT ( pool );
698 ASSERT ( align < 3 );
699
700 R_ACQUIRE_MUTEX(pool);
701
702 if ( !NumberOfBytes )
703 {
704 R_DEBUG("0 bytes requested - initiating pool verification\n");
705 RPoolRedZoneCheck ( pool, __FILE__, __LINE__ );
706 R_RELEASE_MUTEX(pool);
707 return NULL;
708 }
709 if ( NumberOfBytes > pool->PoolSize )
710 {
711 if ( R_IS_POOL_PTR(pool,NumberOfBytes) )
712 {
713 R_DEBUG("red zone verification requested for block 0x%X\n", NumberOfBytes );
714 RUsedRedZoneCheck(pool,RBodyToHdr((void*)NumberOfBytes), (char*)NumberOfBytes, __FILE__, __LINE__ );
715 R_RELEASE_MUTEX(pool);
716 return NULL;
717 }
718 R_DEBUG("Invalid allocation request: %i bytes\n", NumberOfBytes );
719 R_RELEASE_MUTEX(pool);
720 return NULL;
721 }
722
723 que = RQueWhich ( NumberOfBytes );
724 if ( que >= 0 )
725 {
726 if ( (NewBlock = RQueRemove ( &pool->Que[que][align] )) )
727 {
728 RiUsedInit ( NewBlock, Tag );
729 RiUsedInitRedZone ( NewBlock, NumberOfBytes );
730 R_RELEASE_MUTEX(pool);
731 return RHdrToBody(NewBlock);
732 }
733 queBytes = 16 << que;
734 }
735
736 /*
737 * Calculate the total number of bytes we will need.
738 */
739 BlockSize = queBytes + sizeof(R_USED) + 2*R_RZ;
740 if (BlockSize < sizeof(R_FREE))
741 {
742 /* At least we need the size of the free block header. */
743 BlockSize = sizeof(R_FREE);
744 }
745
746 try_again:
747 /*
748 * Find the best-fitting block.
749 */
750 BestBlock = NULL;
751 Alignment = pool->Alignments[align];
752 PreviousBlock = NULL;
753 BestPreviousBlock = NULL,
754 CurrentBlock = pool->FirstFree;
755 BestAlignedAddr = NULL;
756
757 while ( CurrentBlock != NULL )
758 {
759 PVOID Addr = RHdrToBody(CurrentBlock);
760 PVOID CurrentBlockEnd = (char*)CurrentBlock + CurrentBlock->Size;
761 /* calculate last size-aligned address available within this block */
762 PVOID AlignedAddr = R_ROUND_DOWN((char*)CurrentBlockEnd-queBytes-R_RZ, Alignment);
763 ASSERT ( (char*)AlignedAddr+queBytes+R_RZ <= (char*)CurrentBlockEnd );
764
765 /* special case, this address is already size-aligned, and the right size */
766 if ( Addr == AlignedAddr )
767 {
768 BestAlignedAddr = AlignedAddr;
769 BestPreviousBlock = PreviousBlock;
770 BestBlock = CurrentBlock;
771 break;
772 }
773 // if we carve out a size-aligned block... is it still past the end of this
774 // block's free header?
775 else if ( (char*)RBodyToHdr(AlignedAddr)
776 >= (char*)CurrentBlock+sizeof(R_FREE) )
777 {
778 /*
779 * there's enough room to allocate our size-aligned memory out
780 * of this block, see if it's a better choice than any previous
781 * finds
782 */
783 if ( BestBlock == NULL
784 || BestBlock->Size > CurrentBlock->Size )
785 {
786 BestAlignedAddr = AlignedAddr;
787 BestPreviousBlock = PreviousBlock;
788 BestBlock = CurrentBlock;
789 }
790 }
791
792 PreviousBlock = CurrentBlock;
793 CurrentBlock = CurrentBlock->NextFree;
794 }
795
796 /*
797 * We didn't find anything suitable at all.
798 */
799 if (BestBlock == NULL)
800 {
801 if ( !que_reclaimed )
802 {
803 // reclaim que
804 int i, j;
805 for ( i = 0; i < R_QUECOUNT; i++ )
806 {
807 for ( j = 0; j < 3; j++ )
808 {
809 while ( (BestBlock = (PR_FREE)RQueRemove ( &pool->Que[i][j] )) )
810 {
811 RPoolReclaim ( pool, BestBlock );
812 }
813 }
814 }
815
816 que_reclaimed = 1;
817 goto try_again;
818 }
819 DPRINT1("Trying to allocate %lu bytes from paged pool - nothing suitable found, returning NULL\n",
820 queBytes );
821 R_RELEASE_MUTEX(pool);
822 return NULL;
823 }
824 /*
825 * we found a best block. If Addr isn't already aligned, we've pre-qualified that
826 * there's room at the beginning of the block for a free block...
827 */
828 {
829 void* Addr = RHdrToBody(BestBlock);
830 if ( BestAlignedAddr != Addr )
831 {
832 PR_FREE NewFreeBlock = RFreeSplit (
833 pool,
834 BestBlock,
835 (char*)RBodyToHdr(BestAlignedAddr) - (char*)BestBlock );
836 ASSERT ( BestAlignedAddr > Addr );
837
838 //DPRINT ( "breaking off preceding bytes into their own block...\n" );
839 /*DPRINT ( "NewFreeBlock 0x%x Size %lu (Old Block's new size %lu) NextFree 0x%x\n",
840 NewFreeBlock, NewFreeBlock->Size, BestBlock->Size, BestBlock->NextFree );*/
841
842 /* we want the following code to use our size-aligned block */
843 BestPreviousBlock = BestBlock;
844 BestBlock = NewFreeBlock;
845
846 //VerifyPagedPool();
847 }
848 }
849 /*
850 * Is there enough space to create a second block from the unused portion.
851 */
852 if ( (BestBlock->Size - BlockSize) > sizeof(R_FREE) )
853 {
854 /*DPRINT("BestBlock 0x%x Size 0x%x BlockSize 0x%x NewSize 0x%x\n",
855 BestBlock, BestBlock->Size, BlockSize, NewSize );*/
856
857 /*
858 * Create the new free block.
859 */
860 NextBlock = RFreeSplit ( pool, BestBlock, BlockSize );
861 //ASSERT_SIZE ( NextBlock->Size );
862 }
863 /*
864 * Remove the selected block from the list of free blocks.
865 */
866 //DPRINT ( "Removing selected block from free block list\n" );
867 RPoolRemoveFree ( pool, BestBlock );
868 /*
869 * Create the new used block header.
870 */
871 NewBlock = (PR_USED)BestBlock;
872 RiUsedInit ( NewBlock, Tag );
873
874 /* RtlZeroMemory(RHdrToBody(NewBlock), NumberOfBytes);*/
875
876 RiUsedInitRedZone ( NewBlock, NumberOfBytes );
877 R_RELEASE_MUTEX(pool);
878
879 return RHdrToBody(NewBlock);
880 }
881
882 static void
883 RPoolFree ( PR_POOL pool, void* Addr )
884 {
885 PR_USED UsedBlock;
886 rulong UsedSize;
887 PR_FREE FreeBlock;
888 rulong UserSize;
889 int que;
890
891 ASSERT(pool);
892 if ( !Addr )
893 {
894 R_DEBUG("Attempt to free NULL ptr, initiating Red Zone Check\n" );
895 R_ACQUIRE_MUTEX(pool);
896 RPoolRedZoneCheck ( pool, __FILE__, __LINE__ );
897 R_RELEASE_MUTEX(pool);
898 return;
899 }
900 R_ASSERT_PTR(pool,Addr);
901
902 UsedBlock = RBodyToHdr(Addr);
903 UsedSize = UsedBlock->Size;
904 FreeBlock = (PR_FREE)UsedBlock;
905 #if R_RZ
906 UserSize = UsedBlock->UserSize;
907 #else
908 UserSize = UsedSize - sizeof(R_USED) - 2*R_RZ;
909 #endif//R_RZ
910
911 RUsedRedZoneCheck ( pool, UsedBlock, Addr, __FILE__, __LINE__ );
912
913 #if R_RZ
914 memset ( Addr, 0xCD, UsedBlock->UserSize );
915 #endif
916
917 que = RQueWhich ( UserSize );
918 if ( que >= 0 )
919 {
920 int queBytes = 16 << que;
921 ASSERT( (rulong)queBytes >= UserSize );
922 if ( que >= 0 )
923 {
924 int align = 0;
925 if ( R_ROUND_UP(Addr,pool->Alignments[2]) == Addr )
926 align = 2;
927 else if ( R_ROUND_UP(Addr,pool->Alignments[1]) == Addr )
928 align = 1;
929 R_ACQUIRE_MUTEX(pool);
930 RQueAdd ( &pool->Que[que][align], UsedBlock );
931 R_RELEASE_MUTEX(pool);
932 return;
933 }
934 }
935
936 R_ACQUIRE_MUTEX(pool);
937 RPoolReclaim ( pool, FreeBlock );
938 R_RELEASE_MUTEX(pool);
939 }
940
941 #if 0
942 static void
943 RPoolDumpByTag ( PR_POOL pool, rulong Tag )
944 {
945 PR_USED Block = (PR_USED)pool->UserBase;
946 PR_USED NextBlock;
947 int count = 0;
948 char tag[5];
949
950 // TODO FIXME - should we validate params or ASSERT_IRQL?
951 R_DEBUG ( "PagedPool Dump by tag '%s'\n", RFormatTag(Tag,tag) );
952 R_DEBUG ( " -BLOCK-- --SIZE--\n" );
953
954 R_ACQUIRE_MUTEX(pool);
955 for ( ;; )
956 {
957 if ( Block->Status == 1 && Block->Tag == Tag )
958 {
959 R_DEBUG ( " %08X %08X\n", Block, Block->Size );
960 ++count;
961 }
962 NextBlock = (PR_USED)RNextBlock(pool,(PR_FREE)Block);
963 if ( !NextBlock )
964 break;
965 ASSERT ( NextBlock->PrevSize == Block->Size );
966 Block = NextBlock;
967 }
968 R_RELEASE_MUTEX(pool);
969
970 R_DEBUG ( "Entries found for tag '%s': %i\n", tag, count );
971 }
972 #endif
973
974 rulong
975 RPoolQueryTag ( void* Addr )
976 {
977 PR_USED Block = RBodyToHdr(Addr);
978 // TODO FIXME - should we validate params?
979 #if R_MAGIC
980 if ( Block->UsedMagic != R_USED_MAGIC )
981 return 0xDEADBEEF;
982 #endif//R_MAGIC
983 if ( Block->Status != 1 )
984 return 0xDEADBEEF;
985 return Block->Tag;
986 }
987
988 void
989 RPoolStats ( PR_POOL pool )
990 {
991 int free=0, used=0, qued=0;
992 PR_USED Block = (PR_USED)pool->UserBase;
993
994 R_ACQUIRE_MUTEX(pool);
995 while ( Block )
996 {
997 switch ( Block->Status )
998 {
999 case 0:
1000 ++free;
1001 break;
1002 case 1:
1003 ++used;
1004 break;
1005 case 2:
1006 ++qued;
1007 break;
1008 default:
1009 ASSERT ( !"Invalid Status for Block in pool!" );
1010 }
1011 Block = (PR_USED)RNextBlock(pool,(PR_FREE)Block);
1012 }
1013 R_RELEASE_MUTEX(pool);
1014
1015 R_DEBUG ( "Pool Stats: Free=%i, Used=%i, Qued=%i, Total=%i\n", free, used, qued, (free+used+qued) );
1016 }
1017
1018 #ifdef R_LARGEST_ALLOC_POSSIBLE
1019 static rulong
1020 RPoolLargestAllocPossible ( PR_POOL pool, int align )
1021 {
1022 int Alignment = pool->Alignments[align];
1023 rulong LargestUserSize = 0;
1024 PR_FREE Block = (PR_FREE)pool->UserBase;
1025 while ( Block )
1026 {
1027 if ( Block->Status != 1 )
1028 {
1029 void* Addr, *AlignedAddr;
1030 rulong BlockMaxUserSize;
1031 int cue, cueBytes;
1032
1033 Addr = (char*)Block + sizeof(R_USED) + R_RZ;
1034 AlignedAddr = R_ROUND_UP(Addr,Alignment);
1035 if ( Addr != AlignedAddr )
1036 Addr = R_ROUND_UP((char*)Block + sizeof(R_FREE) + sizeof(R_USED) + R_RZ, Alignment );
1037 BlockMaxUserSize = (char*)Block + Block->Size - (char*)Addr - R_RZ;
1038 cue = RQueWhich ( BlockMaxUserSize );
1039 if ( cue >= 0 )
1040 {
1041 cueBytes = 16 << cue;
1042 if ( cueBytes > BlockMaxUserSize );
1043 {
1044 if ( !cue )
1045 BlockMaxUserSize = 0;
1046 else
1047 BlockMaxUserSize = 16 << (cue-1);
1048 }
1049 }
1050 if ( BlockMaxUserSize > LargestUserSize )
1051 LargestUserSize = BlockMaxUserSize;
1052 }
1053 Block = RNextBlock ( pool, Block );
1054 }
1055 return LargestUserSize;
1056 }
1057 #endif//R_LARGEST_ALLOC_POSSIBLE
1058
1059 #endif//RPOOLMGR_H