Move some profile stuff to NDK and fix some bugs in the executive implementation...
[reactos.git] / reactos / ntoskrnl / mm / rpoolmgr.h
1 /* $Id$
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/RPoolMgr.h
6 * PURPOSE: A semi-generic reuseable Pool implementation
7 * PROGRAMMER: Royce Mitchell III
8 * UPDATE HISTORY:
9 */
10
11 #ifndef RPOOLMGR_H
12 #define RPOOLMGR_H
13
14 typedef unsigned long rulong;
15
16 #define R_IS_POOL_PTR(pool,ptr) (void*)(ptr) >= pool->UserBase && (ULONG_PTR)(ptr) < ((ULONG_PTR)pool->UserBase + pool->UserSize)
17 #define R_ASSERT_PTR(pool,ptr) ASSERT( R_IS_POOL_PTR(pool,ptr) )
18 #define R_ASSERT_SIZE(pool,sz) ASSERT( sz > (sizeof(R_USED)+2*R_RZ) && sz >= sizeof(R_FREE) && sz < pool->UserSize )
19
20 #ifndef R_ROUND_UP
21 #define R_ROUND_UP(x,s) ((PVOID)(((ULONG_PTR)(x)+(s)-1) & ~((ULONG_PTR)(s)-1)))
22 #endif//R_ROUND_UP
23
24 #ifndef R_ROUND_DOWN
25 #define R_ROUND_DOWN(x,s) ((PVOID)(((ULONG_PTR)(x)) & ~((ULONG_PTR)(s)-1)))
26 #endif//R_ROUND_DOWN
27
28 #ifndef R_QUEMIN
29 // R_QUEMIN is the minimum number of entries to keep in a que
30 #define R_QUEMIN 0
31 #endif//R_QUEMIN
32
33 #ifndef R_QUECOUNT
34 // 16, 32, 64, 128, 256, 512
35 #define R_QUECOUNT 6
36 #endif//R_QUECOUNT
37
38 #ifndef R_RZ
39 // R_RZ is the redzone size
40 #define R_RZ 4
41 #endif//R_RZ
42
43 #ifndef R_RZ_LOVALUE
44 #define R_RZ_LOVALUE 0x87
45 #endif//R_RZ_LOVALUE
46
47 #ifndef R_RZ_HIVALUE
48 #define R_RZ_HIVALUE 0xA5
49 #endif//R_RZ_HIVALUE
50
51 #ifndef R_STACK
52 // R_STACK is the number of stack entries to store in blocks for debug purposes
53 #define R_STACK 6
54 #else // R_STACK
55 #if R_STACK > 0 && R_STACK < 6
56 /* Increase the frame depth to get a reasonable back trace */
57 #undef R_STACK
58 #define R_STACK 6
59 #endif // R_STACK > 0 && R_STACK < 6
60 #endif//R_STACK
61
62 #ifndef R_TAG
63 // R_TAG do we keep track of tags on a per-memory block basis?
64 #define R_TAG 0
65 #endif//R_TAG
66
67 #ifdef R_MAGIC
68 # ifndef R_FREE_MAGIC
69 # define R_FREE_MAGIC (rulong)(('F'<<0) + ('r'<<8) + ('E'<<16) + ('e'<<24))
70 # endif//R_FREE_MAGIC
71 # ifndef R_USED_MAGIC
72 # define R_USED_MAGIC (rulong)(('u'<<0) + ('S'<<8) + ('e'<<16) + ('D'<<24))
73 # endif//R_USED_MAGIC
74 #endif//R_MAGIC
75
76 // **IMPORTANT NOTE** Magic, PrevSize, Size and Status must be at same offset
77 // in both the R_FREE and R_USED structures
78
79 typedef struct _R_FREE
80 {
81 #ifdef R_MAGIC
82 rulong FreeMagic;
83 #endif//R_MAGIC
84 rulong PrevSize : 30;
85 rulong Status : 2;
86 rulong Size;
87 #if R_STACK
88 ULONG_PTR LastOwnerStack[R_STACK];
89 #endif//R_STACK
90 struct _R_FREE* NextFree;
91 struct _R_FREE* PrevFree;
92 }
93 R_FREE, *PR_FREE;
94
95 typedef struct _R_USED
96 {
97 #ifdef R_MAGIC
98 rulong UsedMagic;
99 #endif//R_MAGIC
100 rulong PrevSize : 30;
101 rulong Status : 2;
102 rulong Size;
103 #if R_STACK
104 ULONG_PTR LastOwnerStack[R_STACK];
105 #endif//R_STACK
106 struct _R_USED* NextUsed;
107 #if R_RZ
108 rulong UserSize; // how many bytes the user actually asked for...
109 #endif//R_RZ
110 rulong Tag;
111 }
112 R_USED, *PR_USED;
113
114 typedef struct _R_QUE
115 {
116 rulong Count;
117 PR_USED First, Last;
118 }
119 R_QUE, *PR_QUE;
120
121 typedef struct _R_POOL
122 {
123 void* PoolBase;
124 rulong PoolSize;
125 void* UserBase;
126 rulong UserSize;
127 rulong Alignments[3];
128 PR_FREE FirstFree, LastFree;
129 R_QUE Que[R_QUECOUNT][3];
130 R_MUTEX Mutex;
131 }
132 R_POOL, *PR_POOL;
133
134 #if !R_STACK
135 #define RiPrintLastOwner(Block)
136 #else
137 static void
138 RiPrintLastOwner ( PR_USED Block )
139 {
140 int i;
141 for ( i = 0; i < R_STACK; i++ )
142 {
143 if ( Block->LastOwnerStack[i] != 0xDEADBEEF )
144 {
145 R_DEBUG(" ");
146 if (!R_PRINT_ADDRESS ((PVOID)Block->LastOwnerStack[i]) )
147 {
148 R_DEBUG("<%X>", Block->LastOwnerStack[i] );
149 }
150 }
151 }
152 }
153 #endif//R_STACK
154
155 static int
156 RQueWhich ( rulong size )
157 {
158 rulong que, quesize;
159 for ( que=0, quesize=16; que < R_QUECOUNT; que++, quesize<<=1 )
160 {
161 if ( quesize >= size )
162 {
163 return que;
164 }
165 }
166 return -1;
167 }
168
169 static void
170 RQueInit ( PR_QUE que )
171 {
172 que->Count = 0;
173 que->First = NULL;
174 que->Last = NULL;
175 }
176
177 static void
178 RQueAdd ( PR_QUE que, PR_USED Item )
179 {
180 ASSERT(Item);
181 Item->Status = 2;
182 Item->NextUsed = NULL;
183 ++que->Count;
184 if ( !que->Last )
185 {
186 que->First = que->Last = Item;
187 return;
188 }
189 ASSERT(!que->Last->NextUsed);
190 que->Last->NextUsed = Item;
191 que->Last = Item;
192 }
193
194 static PR_USED
195 RQueRemove ( PR_QUE que )
196 {
197 PR_USED Item;
198 #if R_QUEMIN
199 if ( que->count < R_QUEMIN )
200 return NULL;
201 #endif
202 if ( !que->First )
203 return NULL;
204 Item = que->First;
205 que->First = Item->NextUsed;
206 if ( !--que->Count )
207 {
208 ASSERT ( !que->First );
209 que->Last = NULL;
210 }
211 Item->Status = 0;
212 return Item;
213 }
214
215 static void
216 RPoolAddFree ( PR_POOL pool, PR_FREE Item )
217 {
218 ASSERT(pool);
219 ASSERT(Item);
220 if ( !pool->FirstFree )
221 {
222 pool->FirstFree = pool->LastFree = Item;
223 Item->NextFree = NULL;
224 }
225 else
226 {
227 pool->FirstFree->PrevFree = Item;
228 Item->NextFree = pool->FirstFree;
229 pool->FirstFree = Item;
230 }
231 Item->PrevFree = NULL;
232 }
233
234 static void
235 RPoolRemoveFree ( PR_POOL pool, PR_FREE Item )
236 {
237 ASSERT(pool);
238 ASSERT(Item);
239 if ( Item->NextFree )
240 Item->NextFree->PrevFree = Item->PrevFree;
241 else
242 {
243 ASSERT ( pool->LastFree == Item );
244 pool->LastFree = Item->PrevFree;
245 }
246 if ( Item->PrevFree )
247 Item->PrevFree->NextFree = Item->NextFree;
248 else
249 {
250 ASSERT ( pool->FirstFree == Item );
251 pool->FirstFree = Item->NextFree;
252 }
253 #ifdef DBG
254 Item->NextFree = Item->PrevFree = (PR_FREE)(ULONG_PTR)0xDEADBEEF;
255 #endif//DBG
256 }
257
258 #if !R_STACK
259 #define RFreeFillStack(free)
260 #define RUsedFillStack(used)
261 #else
262 static void
263 RFreeFillStack ( PR_FREE free )
264 {
265 int i;
266 ULONG stack[R_STACK+3]; // need to skip 3 known levels of stack trace
267 memset ( stack, 0xCD, sizeof(stack) );
268 R_GET_STACK_FRAMES ( stack, R_STACK+3 );
269 for ( i = 0; i < R_STACK; i++ )
270 free->LastOwnerStack[i] = stack[i+3];
271 }
272
273 static void
274 RUsedFillStack ( PR_USED used )
275 {
276 int i;
277 ULONG stack[R_STACK+2]; // need to skip 2 known levels of stack trace
278 memset ( stack, 0xCD, sizeof(stack) );
279 R_GET_STACK_FRAMES ( stack, R_STACK+2 );
280 for ( i = 0; i < R_STACK; i++ )
281 used->LastOwnerStack[i] = stack[i+2];
282 }
283 #endif
284
285 static PR_FREE
286 RFreeInit ( void* memory )
287 {
288 PR_FREE block = (PR_FREE)memory;
289 #if R_FREEMAGIC
290 block->FreeMagic = R_FREE_MAGIC;
291 #endif//R_FREEMAGIC
292 block->Status = 0;
293 RFreeFillStack ( block );
294 #ifdef DBG
295 block->PrevFree = block->NextFree = (PR_FREE)(ULONG_PTR)0xDEADBEEF;
296 #endif//DBG
297 return block;
298 }
299
300 PR_POOL
301 RPoolInit ( void* PoolBase, rulong PoolSize, int align1, int align2, int align3 )
302 {
303 int align, que;
304 PR_POOL pool = (PR_POOL)PoolBase;
305
306 pool->PoolBase = PoolBase;
307 pool->PoolSize = PoolSize;
308 pool->UserBase = (char*)pool->PoolBase + sizeof(R_POOL);
309 pool->UserSize = PoolSize - sizeof(R_POOL);
310 pool->Alignments[0] = align1;
311 pool->Alignments[1] = align2;
312 pool->Alignments[2] = align3;
313 pool->FirstFree = pool->LastFree = NULL;
314
315 RPoolAddFree ( pool,
316 RFreeInit ( pool->UserBase ));
317
318 pool->FirstFree->PrevSize = 0;
319 pool->FirstFree->Size = pool->UserSize;
320
321 for ( que = 0; que < R_QUECOUNT; que++ )
322 {
323 for ( align = 0; align < 3; align++ )
324 {
325 RQueInit ( &pool->Que[que][align] );
326 }
327 }
328 return pool;
329 }
330
331 #if R_RZ
332 static const char*
333 RFormatTag ( rulong Tag, char* buf )
334 {
335 int i;
336 *(rulong*)&buf[0] = Tag;
337 buf[4] = 0;
338 for ( i = 0; i < 4; i++ )
339 {
340 if ( !buf[i] )
341 buf[i] = ' ';
342 }
343 return buf;
344 }
345 #endif
346
347 #if !R_RZ
348 #define RUsedRedZoneCheck(pUsed,Addr,file,line, printzone)
349 #else//R_RZ
350 static void
351 RiBadBlock ( PR_USED pUsed, char* Addr, const char* violation, const char* file, int line, int printzone )
352 {
353 char tag[5];
354 unsigned int i;
355
356 R_DEBUG("%s(%i): %s detected for paged pool address 0x%x\n",
357 file, line, violation, Addr );
358
359 #ifdef R_MAGIC
360 R_DEBUG ( "UsedMagic 0x%x, ", pUsed->UsedMagic );
361 #endif//R_MAGIC
362 R_DEBUG ( "Tag %s(%X), Size %i, UserSize %i",
363 RFormatTag(pUsed->Tag,tag),
364 pUsed->Tag,
365 pUsed->Size,
366 pUsed->UserSize );
367
368 if ( printzone )
369 {
370 unsigned char* HiZone = (unsigned char*)Addr + pUsed->UserSize;
371 unsigned char* LoZone = (unsigned char*)Addr - R_RZ; // this is to simplify indexing below...
372 R_DEBUG ( ", LoZone " );
373 for ( i = 0; i < R_RZ; i++ )
374 R_DEBUG ( "%02x", LoZone[i] );
375 R_DEBUG ( ", HiZone " );
376 for ( i = 0; i < R_RZ; i++ )
377 R_DEBUG ( "%02x", HiZone[i] );
378 }
379 R_DEBUG ( "\n" );
380
381 R_DEBUG ( "First few Stack Frames:" );
382 RiPrintLastOwner ( pUsed );
383 R_DEBUG ( "\n" );
384
385 R_DEBUG ( "Contents of Block:\n" );
386 for ( i = 0; i < 8*16 && i < pUsed->UserSize; i += 16 )
387 {
388 int j;
389 R_DEBUG ( "%04X ", i );
390 for ( j = 0; j < 16; j++ )
391 {
392 if ( i+j < pUsed->UserSize )
393 {
394 R_DEBUG ( "%02X ", (unsigned)(unsigned char)Addr[i+j] );
395 }
396 else
397 {
398 R_DEBUG ( " " );
399 }
400 }
401 R_DEBUG(" ");
402 for ( j = 0; j < 16; j++ )
403 {
404 if ( i+j < pUsed->UserSize )
405 {
406 char c = Addr[i+j];
407 if ( c < 0x20 || c > 0x7E )
408 c = '.';
409 R_DEBUG ( "%c", c );
410 }
411 else
412 {
413 R_DEBUG ( " " );
414 }
415 }
416 R_DEBUG("\n");
417 }
418 R_PANIC();
419 }
420 static void
421 RUsedRedZoneCheck ( PR_POOL pool, PR_USED pUsed, char* Addr, const char* file, int line )
422 {
423 int i;
424 unsigned char *LoZone, *HiZone;
425 int bLow = 1;
426 int bHigh = 1;
427
428 ASSERT ( Addr >= (char*)pool->UserBase && Addr < ((char*)pool->UserBase + pool->UserSize - 16) );
429 #ifdef R_MAGIC
430 if ( pUsed->UsedMagic == MM_PPOOL_FREEMAGIC )
431 {
432 pUsed->UserSize = 0; // just to keep from confusion, MmpBadBlock() doesn't return...
433 RiBadBlock ( pUsed, Addr, "double-free", file, line, 0 );
434 }
435 if ( pUsed->UsedMagic != MM_PPOOL_USEDMAGIC )
436 {
437 RiBadBlock ( pUsed, Addr, "bad magic", file, line, 0 );
438 }
439 #endif//R_MAGIC
440 switch ( pUsed->Status )
441 {
442 case 0: // freed into main pool
443 case 2: // in ques
444 RiBadBlock ( pUsed, Addr, "double-free", file, line, 0 );
445 // no need for break here - RiBadBlock doesn't return
446 case 1: // allocated - this is okay
447 break;
448 default:
449 RiBadBlock ( pUsed, Addr, "corrupt status", file, line, 0 );
450 }
451 if ( pUsed->Status != 1 )
452 {
453 RiBadBlock ( pUsed, Addr, "double-free", file, line, 0 );
454 }
455 if ( pUsed->Size > pool->PoolSize || pUsed->Size == 0 )
456 {
457 RiBadBlock ( pUsed, Addr, "invalid size", file, line, 0 );
458 }
459 if ( pUsed->UserSize > pool->PoolSize || pUsed->UserSize == 0 )
460 {
461 RiBadBlock ( pUsed, Addr, "invalid user size", file, line, 0 );
462 }
463 HiZone = (unsigned char*)Addr + pUsed->UserSize;
464 LoZone = (unsigned char*)Addr - R_RZ; // this is to simplify indexing below...
465 for ( i = 0; i < R_RZ && bLow && bHigh; i++ )
466 {
467 bLow = bLow && ( LoZone[i] == R_RZ_LOVALUE );
468 bHigh = bHigh && ( HiZone[i] == R_RZ_HIVALUE );
469 }
470 if ( !bLow || !bHigh )
471 {
472 const char* violation = "High and Low-side redzone overwrite";
473 if ( bHigh ) // high is okay, so it was just low failed
474 violation = "Low-side redzone overwrite";
475 else if ( bLow ) // low side is okay, so it was just high failed
476 violation = "High-side redzone overwrite";
477 RiBadBlock ( pUsed, Addr, violation, file, line, 1 );
478 }
479 }
480 #endif//R_RZ
481
482 PR_FREE
483 RPreviousBlock ( PR_FREE Block )
484 {
485 if ( Block->PrevSize > 0 )
486 return (PR_FREE)( (char*)Block - Block->PrevSize );
487 return NULL;
488 }
489
490 PR_FREE
491 RNextBlock ( PR_POOL pool, PR_FREE Block )
492 {
493 PR_FREE NextBlock = (PR_FREE)( (char*)Block + Block->Size );
494 if ( (char*)NextBlock >= (char*)pool->UserBase + pool->UserSize )
495 NextBlock = NULL;
496 return NextBlock;
497 }
498
499 static __inline void*
500 RHdrToBody ( void* blk )
501 /*
502 * FUNCTION: Translate a block header address to the corresponding block
503 * address (internal)
504 */
505 {
506 return ( (void *) ((char*)blk + sizeof(R_USED) + R_RZ) );
507 }
508
509 static __inline PR_USED
510 RBodyToHdr ( void* addr )
511 {
512 return (PR_USED)
513 ( ((char*)addr) - sizeof(R_USED) - R_RZ );
514 }
515
516 static int
517 RiInFreeChain ( PR_POOL pool, PR_FREE Block )
518 {
519 PR_FREE Free;
520 Free = pool->FirstFree;
521 if ( Free == Block )
522 return 1;
523 while ( Free != Block )
524 {
525 Free = Free->NextFree;
526 if ( !Free )
527 return 0;
528 }
529 return 1;
530 }
531
532 static void
533 RPoolRedZoneCheck ( PR_POOL pool, const char* file, int line )
534 {
535 {
536 PR_USED Block = (PR_USED)pool->UserBase;
537 PR_USED NextBlock;
538
539 for ( ;; )
540 {
541 switch ( Block->Status )
542 {
543 case 0: // block is in chain
544 ASSERT ( RiInFreeChain ( pool, (PR_FREE)Block ) );
545 break;
546 case 1: // block is allocated
547 RUsedRedZoneCheck ( pool, Block, RHdrToBody(Block), file, line );
548 break;
549 case 2: // block is in que
550 // nothing to verify here yet
551 break;
552 default:
553 ASSERT ( !"invalid status in memory block found in pool!" );
554 }
555 NextBlock = (PR_USED)RNextBlock(pool,(PR_FREE)Block);
556 if ( !NextBlock )
557 break;
558 ASSERT ( NextBlock->PrevSize == Block->Size );
559 Block = NextBlock;
560 }
561 }
562 {
563 // now let's step through the list of free pointers and verify
564 // each one can be found by size-jumping...
565 PR_FREE Free = (PR_FREE)pool->FirstFree;
566 while ( Free )
567 {
568 PR_FREE NextFree = (PR_FREE)pool->UserBase;
569 if ( Free != NextFree )
570 {
571 while ( NextFree != Free )
572 {
573 NextFree = RNextBlock ( pool, NextFree );
574 ASSERT(NextFree);
575 }
576 }
577 Free = Free->NextFree;
578 }
579 }
580 }
581
582 static void
583 RSetSize ( PR_POOL pool, PR_FREE Block, rulong NewSize, PR_FREE NextBlock )
584 {
585 R_ASSERT_PTR(pool,Block);
586 ASSERT ( NewSize < pool->UserSize );
587 ASSERT ( NewSize >= sizeof(R_FREE) );
588 Block->Size = NewSize;
589 if ( !NextBlock )
590 NextBlock = RNextBlock ( pool, Block );
591 if ( NextBlock )
592 NextBlock->PrevSize = NewSize;
593 }
594
595 static PR_FREE
596 RFreeSplit ( PR_POOL pool, PR_FREE Block, rulong NewSize )
597 {
598 PR_FREE NewBlock = (PR_FREE)((char*)Block + NewSize);
599 RSetSize ( pool, NewBlock, Block->Size - NewSize, NULL );
600 RSetSize ( pool, Block, NewSize, NewBlock );
601 RFreeInit ( NewBlock );
602 RPoolAddFree ( pool, NewBlock );
603 return NewBlock;
604 }
605
606 static void
607 RFreeMerge ( PR_POOL pool, PR_FREE First, PR_FREE Second )
608 {
609 ASSERT ( RPreviousBlock(Second) == First );
610 ASSERT ( First->Size == Second->PrevSize );
611 RPoolRemoveFree ( pool, Second );
612 RSetSize ( pool, First, First->Size + Second->Size, NULL );
613 }
614
615 static void
616 RPoolReclaim ( PR_POOL pool, PR_FREE FreeBlock )
617 {
618 PR_FREE NextBlock, PreviousBlock;
619
620 RFreeInit ( FreeBlock );
621 RPoolAddFree ( pool, FreeBlock );
622
623 // TODO FIXME - don't merge and always insert freed blocks at the end for debugging purposes...
624
625 /*
626 * If the next block is immediately adjacent to the newly freed one then
627 * merge them.
628 * PLEASE DO NOT WIPE OUT 'MAGIC' OR 'LASTOWNER' DATA FOR MERGED FREE BLOCKS
629 */
630 NextBlock = RNextBlock ( pool, FreeBlock );
631 if ( NextBlock != NULL && !NextBlock->Status )
632 {
633 RFreeMerge ( pool, FreeBlock, NextBlock );
634 }
635
636 /*
637 * If the previous block is adjacent to the newly freed one then
638 * merge them.
639 * PLEASE DO NOT WIPE OUT 'MAGIC' OR 'LASTOWNER' DATA FOR MERGED FREE BLOCKS
640 */
641 PreviousBlock = RPreviousBlock ( FreeBlock );
642 if ( PreviousBlock != NULL && !PreviousBlock->Status )
643 {
644 RFreeMerge ( pool, PreviousBlock, FreeBlock );
645 }
646 }
647
648 static void
649 RiUsedInit ( PR_USED Block, rulong Tag )
650 {
651 Block->Status = 1;
652 RUsedFillStack ( Block );
653 #if R_MAGIC
654 Block->UsedMagic = R_USED_MAGIC;
655 #endif//R_MAGIC
656 //ASSERT_SIZE ( Block->Size );
657
658 // now add the block to the used block list
659 #ifdef DBG
660 Block->NextUsed = (PR_USED)(ULONG_PTR)0xDEADBEEF;
661 #endif//R_USED_LIST
662
663 Block->Tag = Tag;
664 }
665
666 #if !R_RZ
667 #define RiUsedInitRedZone(Block,UserSize)
668 #else//R_RZ
669 static void
670 RiUsedInitRedZone ( PR_USED Block, rulong UserSize )
671 {
672 // write out buffer-overrun detection bytes
673 char* Addr = (char*)RHdrToBody(Block);
674 Block->UserSize = UserSize;
675 memset ( Addr - R_RZ, R_RZ_LOVALUE, R_RZ );
676 memset ( Addr + Block->UserSize, R_RZ_HIVALUE, R_RZ );
677 #ifdef DBG
678 memset ( Addr, 0xCD, UserSize );
679 #endif//DBG
680 }
681 #endif//R_RZ
682
683 static void*
684 RPoolAlloc ( PR_POOL pool, rulong NumberOfBytes, rulong Tag, rulong align )
685 {
686 PR_USED NewBlock;
687 PR_FREE BestBlock,
688 NextBlock,
689 PreviousBlock,
690 BestPreviousBlock,
691 CurrentBlock;
692 void* BestAlignedAddr;
693 int que,
694 queBytes = NumberOfBytes;
695 rulong BlockSize,
696 Alignment;
697 int que_reclaimed = 0;
698
699 ASSERT ( pool );
700 ASSERT ( align < 3 );
701
702 R_ACQUIRE_MUTEX(pool);
703
704 if ( !NumberOfBytes )
705 {
706 R_DEBUG("0 bytes requested - initiating pool verification\n");
707 RPoolRedZoneCheck ( pool, __FILE__, __LINE__ );
708 R_RELEASE_MUTEX(pool);
709 return NULL;
710 }
711 if ( NumberOfBytes > pool->PoolSize )
712 {
713 if ( R_IS_POOL_PTR(pool,NumberOfBytes) )
714 {
715 R_DEBUG("red zone verification requested for block 0x%X\n", NumberOfBytes );
716 RUsedRedZoneCheck(pool,RBodyToHdr((void*)NumberOfBytes), (char*)NumberOfBytes, __FILE__, __LINE__ );
717 R_RELEASE_MUTEX(pool);
718 return NULL;
719 }
720 R_DEBUG("Invalid allocation request: %i bytes\n", NumberOfBytes );
721 R_RELEASE_MUTEX(pool);
722 return NULL;
723 }
724
725 que = RQueWhich ( NumberOfBytes );
726 if ( que >= 0 )
727 {
728 if ( (NewBlock = RQueRemove ( &pool->Que[que][align] )) )
729 {
730 RiUsedInit ( NewBlock, Tag );
731 RiUsedInitRedZone ( NewBlock, NumberOfBytes );
732 R_RELEASE_MUTEX(pool);
733 return RHdrToBody(NewBlock);
734 }
735 queBytes = 16 << que;
736 }
737
738 /*
739 * Calculate the total number of bytes we will need.
740 */
741 BlockSize = queBytes + sizeof(R_USED) + 2*R_RZ;
742 if (BlockSize < sizeof(R_FREE))
743 {
744 /* At least we need the size of the free block header. */
745 BlockSize = sizeof(R_FREE);
746 }
747
748 try_again:
749 /*
750 * Find the best-fitting block.
751 */
752 BestBlock = NULL;
753 Alignment = pool->Alignments[align];
754 PreviousBlock = NULL;
755 BestPreviousBlock = NULL,
756 CurrentBlock = pool->FirstFree;
757 BestAlignedAddr = NULL;
758
759 while ( CurrentBlock != NULL )
760 {
761 PVOID Addr = RHdrToBody(CurrentBlock);
762 PVOID CurrentBlockEnd = (char*)CurrentBlock + CurrentBlock->Size;
763 /* calculate last size-aligned address available within this block */
764 PVOID AlignedAddr = R_ROUND_DOWN((char*)CurrentBlockEnd-queBytes-R_RZ, Alignment);
765 ASSERT ( (char*)AlignedAddr+queBytes+R_RZ <= (char*)CurrentBlockEnd );
766
767 /* special case, this address is already size-aligned, and the right size */
768 if ( Addr == AlignedAddr )
769 {
770 BestAlignedAddr = AlignedAddr;
771 BestPreviousBlock = PreviousBlock;
772 BestBlock = CurrentBlock;
773 break;
774 }
775 // if we carve out a size-aligned block... is it still past the end of this
776 // block's free header?
777 else if ( (char*)RBodyToHdr(AlignedAddr)
778 >= (char*)CurrentBlock+sizeof(R_FREE) )
779 {
780 /*
781 * there's enough room to allocate our size-aligned memory out
782 * of this block, see if it's a better choice than any previous
783 * finds
784 */
785 if ( BestBlock == NULL
786 || BestBlock->Size > CurrentBlock->Size )
787 {
788 BestAlignedAddr = AlignedAddr;
789 BestPreviousBlock = PreviousBlock;
790 BestBlock = CurrentBlock;
791 }
792 }
793
794 PreviousBlock = CurrentBlock;
795 CurrentBlock = CurrentBlock->NextFree;
796 }
797
798 /*
799 * We didn't find anything suitable at all.
800 */
801 if (BestBlock == NULL)
802 {
803 if ( !que_reclaimed )
804 {
805 // reclaim que
806 int i, j;
807 for ( i = 0; i < R_QUECOUNT; i++ )
808 {
809 for ( j = 0; j < 3; j++ )
810 {
811 while ( (BestBlock = (PR_FREE)RQueRemove ( &pool->Que[i][j] )) )
812 {
813 RPoolReclaim ( pool, BestBlock );
814 }
815 }
816 }
817
818 que_reclaimed = 1;
819 goto try_again;
820 }
821 DPRINT1("Trying to allocate %lu bytes from paged pool - nothing suitable found, returning NULL\n",
822 queBytes );
823 R_RELEASE_MUTEX(pool);
824 return NULL;
825 }
826 /*
827 * we found a best block. If Addr isn't already aligned, we've pre-qualified that
828 * there's room at the beginning of the block for a free block...
829 */
830 {
831 void* Addr = RHdrToBody(BestBlock);
832 if ( BestAlignedAddr != Addr )
833 {
834 PR_FREE NewFreeBlock = RFreeSplit (
835 pool,
836 BestBlock,
837 (char*)RBodyToHdr(BestAlignedAddr) - (char*)BestBlock );
838 ASSERT ( BestAlignedAddr > Addr );
839
840 //DPRINT ( "breaking off preceding bytes into their own block...\n" );
841 /*DPRINT ( "NewFreeBlock 0x%x Size %lu (Old Block's new size %lu) NextFree 0x%x\n",
842 NewFreeBlock, NewFreeBlock->Size, BestBlock->Size, BestBlock->NextFree );*/
843
844 /* we want the following code to use our size-aligned block */
845 BestPreviousBlock = BestBlock;
846 BestBlock = NewFreeBlock;
847
848 //VerifyPagedPool();
849 }
850 }
851 /*
852 * Is there enough space to create a second block from the unused portion.
853 */
854 if ( (BestBlock->Size - BlockSize) > sizeof(R_FREE) )
855 {
856 /*DPRINT("BestBlock 0x%x Size 0x%x BlockSize 0x%x NewSize 0x%x\n",
857 BestBlock, BestBlock->Size, BlockSize, NewSize );*/
858
859 /*
860 * Create the new free block.
861 */
862 NextBlock = RFreeSplit ( pool, BestBlock, BlockSize );
863 //ASSERT_SIZE ( NextBlock->Size );
864 }
865 /*
866 * Remove the selected block from the list of free blocks.
867 */
868 //DPRINT ( "Removing selected block from free block list\n" );
869 RPoolRemoveFree ( pool, BestBlock );
870 /*
871 * Create the new used block header.
872 */
873 NewBlock = (PR_USED)BestBlock;
874 RiUsedInit ( NewBlock, Tag );
875
876 /* RtlZeroMemory(RHdrToBody(NewBlock), NumberOfBytes);*/
877
878 RiUsedInitRedZone ( NewBlock, NumberOfBytes );
879 R_RELEASE_MUTEX(pool);
880
881 return RHdrToBody(NewBlock);
882 }
883
884 static void
885 RPoolFree ( PR_POOL pool, void* Addr )
886 {
887 PR_USED UsedBlock;
888 rulong UsedSize;
889 PR_FREE FreeBlock;
890 rulong UserSize;
891 int que;
892
893 ASSERT(pool);
894 if ( !Addr )
895 {
896 R_DEBUG("Attempt to free NULL ptr, initiating Red Zone Check\n" );
897 R_ACQUIRE_MUTEX(pool);
898 RPoolRedZoneCheck ( pool, __FILE__, __LINE__ );
899 R_RELEASE_MUTEX(pool);
900 return;
901 }
902 R_ASSERT_PTR(pool,Addr);
903
904 UsedBlock = RBodyToHdr(Addr);
905 UsedSize = UsedBlock->Size;
906 FreeBlock = (PR_FREE)UsedBlock;
907 #if R_RZ
908 UserSize = UsedBlock->UserSize;
909 #else
910 UserSize = UsedSize - sizeof(R_USED) - 2*R_RZ;
911 #endif//R_RZ
912
913 RUsedRedZoneCheck ( pool, UsedBlock, Addr, __FILE__, __LINE__ );
914
915 #if R_RZ
916 memset ( Addr, 0xCD, UsedBlock->UserSize );
917 #endif
918
919 que = RQueWhich ( UserSize );
920 if ( que >= 0 )
921 {
922 int queBytes = 16 << que;
923 ASSERT( (rulong)queBytes >= UserSize );
924 if ( que >= 0 )
925 {
926 int align = 0;
927 if ( R_ROUND_UP(Addr,pool->Alignments[2]) == Addr )
928 align = 2;
929 else if ( R_ROUND_UP(Addr,pool->Alignments[1]) == Addr )
930 align = 1;
931 R_ACQUIRE_MUTEX(pool);
932 RQueAdd ( &pool->Que[que][align], UsedBlock );
933 R_RELEASE_MUTEX(pool);
934 return;
935 }
936 }
937
938 R_ACQUIRE_MUTEX(pool);
939 RPoolReclaim ( pool, FreeBlock );
940 R_RELEASE_MUTEX(pool);
941 }
942
943 #if 0
944 static void
945 RPoolDumpByTag ( PR_POOL pool, rulong Tag )
946 {
947 PR_USED Block = (PR_USED)pool->UserBase;
948 PR_USED NextBlock;
949 int count = 0;
950 char tag[5];
951
952 // TODO FIXME - should we validate params or ASSERT_IRQL?
953 R_DEBUG ( "PagedPool Dump by tag '%s'\n", RFormatTag(Tag,tag) );
954 R_DEBUG ( " -BLOCK-- --SIZE--\n" );
955
956 R_ACQUIRE_MUTEX(pool);
957 for ( ;; )
958 {
959 if ( Block->Status == 1 && Block->Tag == Tag )
960 {
961 R_DEBUG ( " %08X %08X\n", Block, Block->Size );
962 ++count;
963 }
964 NextBlock = (PR_USED)RNextBlock(pool,(PR_FREE)Block);
965 if ( !NextBlock )
966 break;
967 ASSERT ( NextBlock->PrevSize == Block->Size );
968 Block = NextBlock;
969 }
970 R_RELEASE_MUTEX(pool);
971
972 R_DEBUG ( "Entries found for tag '%s': %i\n", tag, count );
973 }
974 #endif
975
976 rulong
977 RPoolQueryTag ( void* Addr )
978 {
979 PR_USED Block = RBodyToHdr(Addr);
980 // TODO FIXME - should we validate params?
981 #if R_MAGIC
982 if ( Block->UsedMagic != R_USED_MAGIC )
983 return 0xDEADBEEF;
984 #endif//R_MAGIC
985 if ( Block->Status != 1 )
986 return 0xDEADBEEF;
987 return Block->Tag;
988 }
989
990 void
991 RPoolStats ( PR_POOL pool )
992 {
993 int free=0, used=0, qued=0;
994 PR_USED Block = (PR_USED)pool->UserBase;
995
996 R_ACQUIRE_MUTEX(pool);
997 while ( Block )
998 {
999 switch ( Block->Status )
1000 {
1001 case 0:
1002 ++free;
1003 break;
1004 case 1:
1005 ++used;
1006 break;
1007 case 2:
1008 ++qued;
1009 break;
1010 default:
1011 ASSERT ( !"Invalid Status for Block in pool!" );
1012 }
1013 Block = (PR_USED)RNextBlock(pool,(PR_FREE)Block);
1014 }
1015 R_RELEASE_MUTEX(pool);
1016
1017 R_DEBUG ( "Pool Stats: Free=%i, Used=%i, Qued=%i, Total=%i\n", free, used, qued, (free+used+qued) );
1018 }
1019
1020 #ifdef R_LARGEST_ALLOC_POSSIBLE
1021 static rulong
1022 RPoolLargestAllocPossible ( PR_POOL pool, int align )
1023 {
1024 int Alignment = pool->Alignments[align];
1025 rulong LargestUserSize = 0;
1026 PR_FREE Block = (PR_FREE)pool->UserBase;
1027 while ( Block )
1028 {
1029 if ( Block->Status != 1 )
1030 {
1031 void* Addr, *AlignedAddr;
1032 rulong BlockMaxUserSize;
1033 int cue, cueBytes;
1034
1035 Addr = (char*)Block + sizeof(R_USED) + R_RZ;
1036 AlignedAddr = R_ROUND_UP(Addr,Alignment);
1037 if ( Addr != AlignedAddr )
1038 Addr = R_ROUND_UP((char*)Block + sizeof(R_FREE) + sizeof(R_USED) + R_RZ, Alignment );
1039 BlockMaxUserSize = (char*)Block + Block->Size - (char*)Addr - R_RZ;
1040 cue = RQueWhich ( BlockMaxUserSize );
1041 if ( cue >= 0 )
1042 {
1043 cueBytes = 16 << cue;
1044 if ( cueBytes > BlockMaxUserSize );
1045 {
1046 if ( !cue )
1047 BlockMaxUserSize = 0;
1048 else
1049 BlockMaxUserSize = 16 << (cue-1);
1050 }
1051 }
1052 if ( BlockMaxUserSize > LargestUserSize )
1053 LargestUserSize = BlockMaxUserSize;
1054 }
1055 Block = RNextBlock ( pool, Block );
1056 }
1057 return LargestUserSize;
1058 }
1059 #endif//R_LARGEST_ALLOC_POSSIBLE
1060
1061 #endif//RPOOLMGR_H