Merge from amd64 branch:
[reactos.git] / reactos / ntoskrnl / mm / ppool.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/ppool.c
5 * PURPOSE: Implements the paged pool
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Royce Mitchell III
9 */
10
11 /* INCLUDES *****************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16
17 #if defined (ALLOC_PRAGMA)
18 #pragma alloc_text(INIT, MmInitializePagedPool)
19 #endif
20
21 #undef ASSERT
22 #define ASSERT(x) if (!(x)) {DbgPrint("Assertion "#x" failed at %s:%d\n", __FILE__,__LINE__); DbgBreakPoint(); }
23
24 // enable "magic"
25 //#define R_MAGIC
26 #define R_MUTEX FAST_MUTEX
27 #define R_ACQUIRE_MUTEX(pool) /*DPRINT1("Acquiring PPool Mutex\n");*/ ExAcquireFastMutex(&pool->Mutex)
28 #define R_RELEASE_MUTEX(pool) /*DPRINT1("Releasing PPool Mutex\n");*/ ExReleaseFastMutex(&pool->Mutex)
29 #define R_PRINT_ADDRESS(addr) KeRosPrintAddress(addr)
30 #define R_PANIC() KeBugCheck(MEMORY_MANAGEMENT)
31 #define R_DEBUG DbgPrint
32
33 #ifdef _ARM_
34 #define R_GET_STACK_FRAMES(ptr,cnt)
35 #else
36 #define R_GET_STACK_FRAMES(ptr,cnt) RtlWalkFrameChain((PVOID*)ptr,cnt, 0)
37 #endif
38
39 /* GLOBALS ********************************************************************/
40
41 typedef unsigned long rulong;
42
43 #define R_IS_POOL_PTR(pool,ptr) (((void*)(ULONG_PTR)(ptr) >= pool->UserBase) && ((ULONG_PTR)(ptr) < ((ULONG_PTR)pool->UserBase + pool->UserSize)))
44 #define R_ASSERT_PTR(pool,ptr) ASSERT( R_IS_POOL_PTR(pool,ptr) )
45 #define R_ASSERT_SIZE(pool,sz) ASSERT( sz > (sizeof(R_USED)+2*R_RZ) && sz >= sizeof(R_FREE) && sz < pool->UserSize )
46
47 #ifndef R_ROUND_UP
48 #define R_ROUND_UP(x,s) ((PVOID)(((ULONG_PTR)(x)+(s)-1) & ~((ULONG_PTR)(s)-1)))
49 #endif//R_ROUND_UP
50
51 #ifndef R_ROUND_DOWN
52 #define R_ROUND_DOWN(x,s) ((PVOID)(((ULONG_PTR)(x)) & ~((ULONG_PTR)(s)-1)))
53 #endif//R_ROUND_DOWN
54
55 #ifndef R_QUEMIN
56 // R_QUEMIN is the minimum number of entries to keep in a que
57 #define R_QUEMIN 0
58 #endif//R_QUEMIN
59
60 #ifndef R_QUECOUNT
61 // 16, 32, 64, 128, 256, 512
62 #define R_QUECOUNT 6
63 #endif//R_QUECOUNT
64
65 #ifndef R_RZ
66 // R_RZ is the redzone size
67 #define R_RZ 4
68 #endif//R_RZ
69
70 #ifndef R_RZ_LOVALUE
71 #define R_RZ_LOVALUE 0x87
72 #endif//R_RZ_LOVALUE
73
74 #ifndef R_RZ_HIVALUE
75 #define R_RZ_HIVALUE 0xA5
76 #endif//R_RZ_HIVALUE
77
78 #ifndef R_STACK
79 // R_STACK is the number of stack entries to store in blocks for debug purposes
80 #define R_STACK 6
81 #else // R_STACK
82 #if R_STACK > 0 && R_STACK < 6
83 /* Increase the frame depth to get a reasonable back trace */
84 #undef R_STACK
85 #define R_STACK 6
86 #endif // R_STACK > 0 && R_STACK < 6
87 #endif//R_STACK
88
89 #ifndef R_TAG
90 // R_TAG do we keep track of tags on a per-memory block basis?
91 #define R_TAG 0
92 #endif//R_TAG
93
94 #ifdef R_MAGIC
95 # ifndef R_FREE_MAGIC
96 # define R_FREE_MAGIC (rulong)(('F'<<0) + ('r'<<8) + ('E'<<16) + ('e'<<24))
97 # endif//R_FREE_MAGIC
98 # ifndef R_USED_MAGIC
99 # define R_USED_MAGIC (rulong)(('u'<<0) + ('S'<<8) + ('e'<<16) + ('D'<<24))
100 # endif//R_USED_MAGIC
101 #endif//R_MAGIC
102
103 // **IMPORTANT NOTE** Magic, PrevSize, Size and Status must be at same offset
104 // in both the R_FREE and R_USED structures
105
106 typedef struct _R_FREE
107 {
108 #ifdef R_MAGIC
109 rulong FreeMagic;
110 #endif//R_MAGIC
111 rulong PrevSize : 30;
112 rulong Status : 2;
113 rulong Size;
114 #if R_STACK
115 ULONG_PTR LastOwnerStack[R_STACK];
116 #endif//R_STACK
117 struct _R_FREE* NextFree;
118 struct _R_FREE* PrevFree;
119 }
120 R_FREE, *PR_FREE;
121
122 typedef struct _R_USED
123 {
124 #ifdef R_MAGIC
125 rulong UsedMagic;
126 #endif//R_MAGIC
127 rulong PrevSize : 30;
128 rulong Status : 2;
129 rulong Size;
130 #if R_STACK
131 ULONG_PTR LastOwnerStack[R_STACK];
132 #endif//R_STACK
133 struct _R_USED* NextUsed;
134 #if R_RZ
135 rulong UserSize; // how many bytes the user actually asked for...
136 #endif//R_RZ
137 rulong Tag;
138 }
139 R_USED, *PR_USED;
140
141 typedef struct _R_QUE
142 {
143 rulong Count;
144 PR_USED First, Last;
145 }
146 R_QUE, *PR_QUE;
147
148 typedef struct _R_POOL
149 {
150 void* PoolBase;
151 rulong PoolSize;
152 void* UserBase;
153 rulong UserSize;
154 rulong Alignments[3];
155 PR_FREE FirstFree, LastFree;
156 R_QUE Que[R_QUECOUNT][3];
157 R_MUTEX Mutex;
158 }
159 R_POOL, *PR_POOL;
160
161 PVOID MmPagedPoolBase;
162 ULONG MmPagedPoolSize;
163 ULONG MmTotalPagedPoolQuota = 0; // TODO FIXME commented out until we use it
164 static PR_POOL MmPagedPool = NULL;
165
166 /* FUNCTIONS*******************************************************************/
167
168 #if !R_STACK
169 #define RiPrintLastOwner(Block)
170 #else
171 static void
172 RiPrintLastOwner ( PR_USED Block )
173 {
174 int i;
175 for ( i = 0; i < R_STACK; i++ )
176 {
177 if ( Block->LastOwnerStack[i] != 0xDEADBEEF )
178 {
179 R_DEBUG(" ");
180 //if (!R_PRINT_ADDRESS ((PVOID)Block->LastOwnerStack[i]) )
181 {
182 R_DEBUG("<%X>", Block->LastOwnerStack[i] );
183 }
184 }
185 }
186 }
187 #endif//R_STACK
188
189 static int
190 RQueWhich ( rulong size )
191 {
192 rulong que, quesize;
193 for ( que=0, quesize=16; que < R_QUECOUNT; que++, quesize<<=1 )
194 {
195 if ( quesize >= size )
196 {
197 return que;
198 }
199 }
200 return -1;
201 }
202
203 static void
204 RQueInit ( PR_QUE que )
205 {
206 que->Count = 0;
207 que->First = NULL;
208 que->Last = NULL;
209 }
210
211 static void
212 RQueAdd ( PR_QUE que, PR_USED Item )
213 {
214 ASSERT(Item);
215 Item->Status = 2;
216 Item->NextUsed = NULL;
217 ++que->Count;
218 if ( !que->Last )
219 {
220 que->First = que->Last = Item;
221 return;
222 }
223 ASSERT(!que->Last->NextUsed);
224 que->Last->NextUsed = Item;
225 que->Last = Item;
226 }
227
228 static PR_USED
229 RQueRemove ( PR_QUE que )
230 {
231 PR_USED Item;
232 #if R_QUEMIN
233 if ( que->count < R_QUEMIN )
234 return NULL;
235 #endif
236 if ( !que->First )
237 return NULL;
238 Item = que->First;
239 que->First = Item->NextUsed;
240 if ( !--que->Count )
241 {
242 ASSERT ( !que->First );
243 que->Last = NULL;
244 }
245 Item->Status = 0;
246 return Item;
247 }
248
249 static void
250 RPoolAddFree ( PR_POOL pool, PR_FREE Item )
251 {
252 ASSERT(pool);
253 ASSERT(Item);
254 if ( !pool->FirstFree )
255 {
256 pool->FirstFree = pool->LastFree = Item;
257 Item->NextFree = NULL;
258 }
259 else
260 {
261 pool->FirstFree->PrevFree = Item;
262 Item->NextFree = pool->FirstFree;
263 pool->FirstFree = Item;
264 }
265 Item->PrevFree = NULL;
266 }
267
268 static void
269 RPoolRemoveFree ( PR_POOL pool, PR_FREE Item )
270 {
271 ASSERT(pool);
272 ASSERT(Item);
273 if ( Item->NextFree )
274 Item->NextFree->PrevFree = Item->PrevFree;
275 else
276 {
277 ASSERT ( pool->LastFree == Item );
278 pool->LastFree = Item->PrevFree;
279 }
280 if ( Item->PrevFree )
281 Item->PrevFree->NextFree = Item->NextFree;
282 else
283 {
284 ASSERT ( pool->FirstFree == Item );
285 pool->FirstFree = Item->NextFree;
286 }
287 #if DBG
288 Item->NextFree = Item->PrevFree = (PR_FREE)(ULONG_PTR)0xDEADBEEF;
289 #endif//DBG
290 }
291
292 #if !R_STACK
293 #define RFreeFillStack(free)
294 #define RUsedFillStack(used)
295 #else
296 static void
297 RFreeFillStack ( PR_FREE free )
298 {
299 int i;
300 ULONG stack[R_STACK+3]; // need to skip 3 known levels of stack trace
301 memset ( stack, 0xCD, sizeof(stack) );
302 R_GET_STACK_FRAMES ( stack, R_STACK+3 );
303 for ( i = 0; i < R_STACK; i++ )
304 free->LastOwnerStack[i] = stack[i+3];
305 }
306
307 static void
308 RUsedFillStack ( PR_USED used )
309 {
310 int i;
311 ULONG stack[R_STACK+2]; // need to skip 2 known levels of stack trace
312 memset ( stack, 0xCD, sizeof(stack) );
313 R_GET_STACK_FRAMES ( stack, R_STACK+2 );
314 for ( i = 0; i < R_STACK; i++ )
315 used->LastOwnerStack[i] = stack[i+2];
316 }
317 #endif
318
319 static PR_FREE
320 RFreeInit ( void* memory )
321 {
322 PR_FREE block = (PR_FREE)memory;
323 #if R_FREEMAGIC
324 block->FreeMagic = R_FREE_MAGIC;
325 #endif//R_FREEMAGIC
326 block->Status = 0;
327 RFreeFillStack ( block );
328 #if DBG
329 block->PrevFree = block->NextFree = (PR_FREE)(ULONG_PTR)0xDEADBEEF;
330 #endif//DBG
331 return block;
332 }
333
334 PR_POOL
335 RPoolInit ( void* PoolBase, rulong PoolSize, int align1, int align2, int align3 )
336 {
337 int align, que;
338 PR_POOL pool = (PR_POOL)PoolBase;
339
340 pool->PoolBase = PoolBase;
341 pool->PoolSize = PoolSize;
342 pool->UserBase = (char*)pool->PoolBase + sizeof(R_POOL);
343 pool->UserSize = PoolSize - sizeof(R_POOL);
344 pool->Alignments[0] = align1;
345 pool->Alignments[1] = align2;
346 pool->Alignments[2] = align3;
347 pool->FirstFree = pool->LastFree = NULL;
348
349 RPoolAddFree ( pool,
350 RFreeInit ( pool->UserBase ));
351
352 pool->FirstFree->PrevSize = 0;
353 pool->FirstFree->Size = pool->UserSize;
354
355 for ( que = 0; que < R_QUECOUNT; que++ )
356 {
357 for ( align = 0; align < 3; align++ )
358 {
359 RQueInit ( &pool->Que[que][align] );
360 }
361 }
362 return pool;
363 }
364
365 #if R_RZ
366 static const char*
367 RFormatTag ( rulong Tag, char* buf )
368 {
369 int i;
370 *(rulong*)&buf[0] = Tag;
371 buf[4] = 0;
372 for ( i = 0; i < 4; i++ )
373 {
374 if ( !buf[i] )
375 buf[i] = ' ';
376 }
377 return buf;
378 }
379 #endif
380
381 #if !R_RZ
382 #define RUsedRedZoneCheck(pUsed,Addr,file,line, printzone)
383 #else//R_RZ
384 static void
385 RiBadBlock ( PR_USED pUsed, char* Addr, const char* violation, const char* file, int line, int printzone )
386 {
387 char tag[5];
388 unsigned int i;
389
390 R_DEBUG("%s(%i): %s detected for paged pool address 0x%x\n",
391 file, line, violation, Addr );
392
393 #ifdef R_MAGIC
394 R_DEBUG ( "UsedMagic 0x%x, ", pUsed->UsedMagic );
395 #endif//R_MAGIC
396 R_DEBUG ( "Tag %s(%X), Size %i, UserSize %i",
397 RFormatTag(pUsed->Tag,tag),
398 pUsed->Tag,
399 pUsed->Size,
400 pUsed->UserSize );
401
402 if ( printzone )
403 {
404 unsigned char* HiZone = (unsigned char*)Addr + pUsed->UserSize;
405 unsigned char* LoZone = (unsigned char*)Addr - R_RZ; // this is to simplify indexing below...
406 R_DEBUG ( ", LoZone " );
407 for ( i = 0; i < R_RZ; i++ )
408 R_DEBUG ( "%02x", LoZone[i] );
409 R_DEBUG ( ", HiZone " );
410 for ( i = 0; i < R_RZ; i++ )
411 R_DEBUG ( "%02x", HiZone[i] );
412 }
413 R_DEBUG ( "\n" );
414
415 R_DEBUG ( "First few Stack Frames:" );
416 RiPrintLastOwner ( pUsed );
417 R_DEBUG ( "\n" );
418
419 R_DEBUG ( "Contents of Block:\n" );
420 for ( i = 0; i < 8*16 && i < pUsed->UserSize; i += 16 )
421 {
422 int j;
423 R_DEBUG ( "%04X ", i );
424 for ( j = 0; j < 16; j++ )
425 {
426 if ( i+j < pUsed->UserSize )
427 {
428 R_DEBUG ( "%02X ", (unsigned)(unsigned char)Addr[i+j] );
429 }
430 else
431 {
432 R_DEBUG ( " " );
433 }
434 }
435 R_DEBUG(" ");
436 for ( j = 0; j < 16; j++ )
437 {
438 if ( i+j < pUsed->UserSize )
439 {
440 char c = Addr[i+j];
441 if ( c < 0x20 || c > 0x7E )
442 c = '.';
443 R_DEBUG ( "%c", c );
444 }
445 else
446 {
447 R_DEBUG ( " " );
448 }
449 }
450 R_DEBUG("\n");
451 }
452 R_PANIC();
453 }
454 static void
455 RUsedRedZoneCheck ( PR_POOL pool, PR_USED pUsed, char* Addr, const char* file, int line )
456 {
457 int i;
458 unsigned char *LoZone, *HiZone;
459 int bLow = 1;
460 int bHigh = 1;
461
462 ASSERT ( Addr >= (char*)pool->UserBase && Addr < ((char*)pool->UserBase + pool->UserSize - 16) );
463 #ifdef R_MAGIC
464 if ( pUsed->UsedMagic == R_FREE_MAGIC )
465 {
466 pUsed->UserSize = 0; // just to keep from confusion, MmpBadBlock() doesn't return...
467 RiBadBlock ( pUsed, Addr, "double-free", file, line, 0 );
468 }
469 if ( pUsed->UsedMagic != R_USED_MAGIC )
470 {
471 RiBadBlock ( pUsed, Addr, "bad magic", file, line, 0 );
472 }
473 #endif//R_MAGIC
474 switch ( pUsed->Status )
475 {
476 case 0: // freed into main pool
477 case 2: // in ques
478 RiBadBlock ( pUsed, Addr, "double-free", file, line, 0 );
479 // no need for break here - RiBadBlock doesn't return
480 case 1: // allocated - this is okay
481 break;
482 default:
483 RiBadBlock ( pUsed, Addr, "corrupt status", file, line, 0 );
484 }
485 if ( pUsed->Status != 1 )
486 {
487 RiBadBlock ( pUsed, Addr, "double-free", file, line, 0 );
488 }
489 if ( pUsed->Size > pool->PoolSize || pUsed->Size == 0 )
490 {
491 RiBadBlock ( pUsed, Addr, "invalid size", file, line, 0 );
492 }
493 if ( pUsed->UserSize > pool->PoolSize || pUsed->UserSize == 0 )
494 {
495 RiBadBlock ( pUsed, Addr, "invalid user size", file, line, 0 );
496 }
497 HiZone = (unsigned char*)Addr + pUsed->UserSize;
498 LoZone = (unsigned char*)Addr - R_RZ; // this is to simplify indexing below...
499 for ( i = 0; i < R_RZ && bLow && bHigh; i++ )
500 {
501 bLow = bLow && ( LoZone[i] == R_RZ_LOVALUE );
502 bHigh = bHigh && ( HiZone[i] == R_RZ_HIVALUE );
503 }
504 if ( !bLow || !bHigh )
505 {
506 const char* violation = "High and Low-side redzone overwrite";
507 if ( bHigh ) // high is okay, so it was just low failed
508 violation = "Low-side redzone overwrite";
509 else if ( bLow ) // low side is okay, so it was just high failed
510 violation = "High-side redzone overwrite";
511 RiBadBlock ( pUsed, Addr, violation, file, line, 1 );
512 }
513 }
514 #endif//R_RZ
515
516 PR_FREE
517 RPreviousBlock ( PR_FREE Block )
518 {
519 if ( Block->PrevSize > 0 )
520 return (PR_FREE)( (char*)Block - Block->PrevSize );
521 return NULL;
522 }
523
524 PR_FREE
525 RNextBlock ( PR_POOL pool, PR_FREE Block )
526 {
527 PR_FREE NextBlock = (PR_FREE)( (char*)Block + Block->Size );
528 if ( (char*)NextBlock >= (char*)pool->UserBase + pool->UserSize )
529 NextBlock = NULL;
530 return NextBlock;
531 }
532
533 static __inline void*
534 RHdrToBody ( void* blk )
535 /*
536 * FUNCTION: Translate a block header address to the corresponding block
537 * address (internal)
538 */
539 {
540 return ( (void *) ((char*)blk + sizeof(R_USED) + R_RZ) );
541 }
542
543 static __inline PR_USED
544 RBodyToHdr ( void* addr )
545 {
546 return (PR_USED)
547 ( ((char*)addr) - sizeof(R_USED) - R_RZ );
548 }
549
550 static int
551 RiInFreeChain ( PR_POOL pool, PR_FREE Block )
552 {
553 PR_FREE Free;
554 Free = pool->FirstFree;
555 if ( Free == Block )
556 return 1;
557 while ( Free != Block )
558 {
559 Free = Free->NextFree;
560 if ( !Free )
561 return 0;
562 }
563 return 1;
564 }
565
566 static void
567 RPoolRedZoneCheck ( PR_POOL pool, const char* file, int line )
568 {
569 {
570 PR_USED Block = (PR_USED)pool->UserBase;
571 PR_USED NextBlock;
572
573 for ( ;; )
574 {
575 switch ( Block->Status )
576 {
577 case 0: // block is in chain
578 ASSERT ( RiInFreeChain ( pool, (PR_FREE)Block ) );
579 break;
580 case 1: // block is allocated
581 RUsedRedZoneCheck ( pool, Block, RHdrToBody(Block), file, line );
582 break;
583 case 2: // block is in que
584 // nothing to verify here yet
585 break;
586 default:
587 ASSERT ( !"invalid status in memory block found in pool!" );
588 }
589 NextBlock = (PR_USED)RNextBlock(pool,(PR_FREE)Block);
590 if ( !NextBlock )
591 break;
592 ASSERT ( NextBlock->PrevSize == Block->Size );
593 Block = NextBlock;
594 }
595 }
596 {
597 // now let's step through the list of free pointers and verify
598 // each one can be found by size-jumping...
599 PR_FREE Free = (PR_FREE)pool->FirstFree;
600 while ( Free )
601 {
602 PR_FREE NextFree = (PR_FREE)pool->UserBase;
603 if ( Free != NextFree )
604 {
605 while ( NextFree != Free )
606 {
607 NextFree = RNextBlock ( pool, NextFree );
608 ASSERT(NextFree);
609 }
610 }
611 Free = Free->NextFree;
612 }
613 }
614 }
615
616 static void
617 RSetSize ( PR_POOL pool, PR_FREE Block, rulong NewSize, PR_FREE NextBlock )
618 {
619 R_ASSERT_PTR(pool,Block);
620 ASSERT ( NewSize < pool->UserSize );
621 ASSERT ( NewSize >= sizeof(R_FREE) );
622 Block->Size = NewSize;
623 if ( !NextBlock )
624 NextBlock = RNextBlock ( pool, Block );
625 if ( NextBlock )
626 NextBlock->PrevSize = NewSize;
627 }
628
629 static PR_FREE
630 RFreeSplit ( PR_POOL pool, PR_FREE Block, rulong NewSize )
631 {
632 PR_FREE NewBlock = (PR_FREE)((char*)Block + NewSize);
633 RSetSize ( pool, NewBlock, Block->Size - NewSize, NULL );
634 RSetSize ( pool, Block, NewSize, NewBlock );
635 RFreeInit ( NewBlock );
636 RPoolAddFree ( pool, NewBlock );
637 return NewBlock;
638 }
639
640 static void
641 RFreeMerge ( PR_POOL pool, PR_FREE First, PR_FREE Second )
642 {
643 ASSERT ( RPreviousBlock(Second) == First );
644 ASSERT ( First->Size == Second->PrevSize );
645 RPoolRemoveFree ( pool, Second );
646 RSetSize ( pool, First, First->Size + Second->Size, NULL );
647 }
648
649 static void
650 RPoolReclaim ( PR_POOL pool, PR_FREE FreeBlock )
651 {
652 PR_FREE NextBlock, PreviousBlock;
653
654 RFreeInit ( FreeBlock );
655 RPoolAddFree ( pool, FreeBlock );
656
657 // TODO FIXME - don't merge and always insert freed blocks at the end for debugging purposes...
658
659 /*
660 * If the next block is immediately adjacent to the newly freed one then
661 * merge them.
662 * PLEASE DO NOT WIPE OUT 'MAGIC' OR 'LASTOWNER' DATA FOR MERGED FREE BLOCKS
663 */
664 NextBlock = RNextBlock ( pool, FreeBlock );
665 if ( NextBlock != NULL && !NextBlock->Status )
666 {
667 RFreeMerge ( pool, FreeBlock, NextBlock );
668 }
669
670 /*
671 * If the previous block is adjacent to the newly freed one then
672 * merge them.
673 * PLEASE DO NOT WIPE OUT 'MAGIC' OR 'LASTOWNER' DATA FOR MERGED FREE BLOCKS
674 */
675 PreviousBlock = RPreviousBlock ( FreeBlock );
676 if ( PreviousBlock != NULL && !PreviousBlock->Status )
677 {
678 RFreeMerge ( pool, PreviousBlock, FreeBlock );
679 }
680 }
681
682 static void
683 RiUsedInit ( PR_USED Block, rulong Tag )
684 {
685 Block->Status = 1;
686 RUsedFillStack ( Block );
687 #ifdef R_MAGIC
688 Block->UsedMagic = R_USED_MAGIC;
689 #endif//R_MAGIC
690 //ASSERT_SIZE ( Block->Size );
691
692 // now add the block to the used block list
693 #if DBG
694 Block->NextUsed = (PR_USED)(ULONG_PTR)0xDEADBEEF;
695 #endif//R_USED_LIST
696
697 Block->Tag = Tag;
698 }
699
700 #if !R_RZ
701 #define RiUsedInitRedZone(Block,UserSize)
702 #else//R_RZ
703 static void
704 RiUsedInitRedZone ( PR_USED Block, rulong UserSize )
705 {
706 // write out buffer-overrun detection bytes
707 char* Addr = (char*)RHdrToBody(Block);
708 Block->UserSize = UserSize;
709 memset ( Addr - R_RZ, R_RZ_LOVALUE, R_RZ );
710 memset ( Addr + Block->UserSize, R_RZ_HIVALUE, R_RZ );
711 #if DBG
712 memset ( Addr, 0xCD, UserSize );
713 #endif//DBG
714 }
715 #endif//R_RZ
716
717 static void*
718 RPoolAlloc ( PR_POOL pool, rulong NumberOfBytes, rulong Tag, rulong align )
719 {
720 PR_USED NewBlock;
721 PR_FREE BestBlock,
722 NextBlock,
723 PreviousBlock,
724 BestPreviousBlock,
725 CurrentBlock;
726 void* BestAlignedAddr;
727 int que,
728 queBytes = NumberOfBytes;
729 rulong BlockSize,
730 Alignment;
731 int que_reclaimed = 0;
732
733 ASSERT ( pool );
734 ASSERT ( align < 3 );
735
736 R_ACQUIRE_MUTEX(pool);
737
738 if ( !NumberOfBytes )
739 {
740 R_DEBUG("0 bytes requested - initiating pool verification\n");
741 RPoolRedZoneCheck ( pool, __FILE__, __LINE__ );
742 R_RELEASE_MUTEX(pool);
743 return NULL;
744 }
745 if ( NumberOfBytes > pool->PoolSize )
746 {
747 if ( R_IS_POOL_PTR(pool,NumberOfBytes) )
748 {
749 R_DEBUG("red zone verification requested for block 0x%X\n", NumberOfBytes );
750 RUsedRedZoneCheck(pool,RBodyToHdr((void*)(ULONG_PTR)NumberOfBytes), (char*)(ULONG_PTR)NumberOfBytes, __FILE__, __LINE__ );
751 R_RELEASE_MUTEX(pool);
752 return NULL;
753 }
754 R_DEBUG("Invalid allocation request: %i bytes\n", NumberOfBytes );
755 R_RELEASE_MUTEX(pool);
756 return NULL;
757 }
758
759 que = RQueWhich ( NumberOfBytes );
760 if ( que >= 0 )
761 {
762 if ( (NewBlock = RQueRemove ( &pool->Que[que][align] )) )
763 {
764 RiUsedInit ( NewBlock, Tag );
765 RiUsedInitRedZone ( NewBlock, NumberOfBytes );
766 R_RELEASE_MUTEX(pool);
767 return RHdrToBody(NewBlock);
768 }
769 queBytes = 16 << que;
770 }
771
772 /*
773 * Calculate the total number of bytes we will need.
774 */
775 BlockSize = queBytes + sizeof(R_USED) + 2*R_RZ;
776 if (BlockSize < sizeof(R_FREE))
777 {
778 /* At least we need the size of the free block header. */
779 BlockSize = sizeof(R_FREE);
780 }
781
782 try_again:
783 /*
784 * Find the best-fitting block.
785 */
786 BestBlock = NULL;
787 Alignment = pool->Alignments[align];
788 PreviousBlock = NULL;
789 BestPreviousBlock = NULL,
790 CurrentBlock = pool->FirstFree;
791 BestAlignedAddr = NULL;
792
793 while ( CurrentBlock != NULL )
794 {
795 PVOID Addr = RHdrToBody(CurrentBlock);
796 PVOID CurrentBlockEnd = (char*)CurrentBlock + CurrentBlock->Size;
797 /* calculate last size-aligned address available within this block */
798 PVOID AlignedAddr = R_ROUND_DOWN((char*)CurrentBlockEnd-queBytes-R_RZ, Alignment);
799 ASSERT ( (char*)AlignedAddr+queBytes+R_RZ <= (char*)CurrentBlockEnd );
800
801 /* special case, this address is already size-aligned, and the right size */
802 if ( Addr == AlignedAddr )
803 {
804 BestAlignedAddr = AlignedAddr;
805 BestPreviousBlock = PreviousBlock;
806 BestBlock = CurrentBlock;
807 break;
808 }
809 // if we carve out a size-aligned block... is it still past the end of this
810 // block's free header?
811 else if ( (char*)RBodyToHdr(AlignedAddr)
812 >= (char*)CurrentBlock+sizeof(R_FREE) )
813 {
814 /*
815 * there's enough room to allocate our size-aligned memory out
816 * of this block, see if it's a better choice than any previous
817 * finds
818 */
819 if ( BestBlock == NULL
820 || BestBlock->Size > CurrentBlock->Size )
821 {
822 BestAlignedAddr = AlignedAddr;
823 BestPreviousBlock = PreviousBlock;
824 BestBlock = CurrentBlock;
825 }
826 }
827
828 PreviousBlock = CurrentBlock;
829 CurrentBlock = CurrentBlock->NextFree;
830 }
831
832 /*
833 * We didn't find anything suitable at all.
834 */
835 if (BestBlock == NULL)
836 {
837 if ( !que_reclaimed )
838 {
839 // reclaim que
840 int i, j;
841 for ( i = 0; i < R_QUECOUNT; i++ )
842 {
843 for ( j = 0; j < 3; j++ )
844 {
845 while ( (BestBlock = (PR_FREE)RQueRemove ( &pool->Que[i][j] )) )
846 {
847 RPoolReclaim ( pool, BestBlock );
848 }
849 }
850 }
851
852 que_reclaimed = 1;
853 goto try_again;
854 }
855 DPRINT1("Trying to allocate %lu bytes from paged pool - nothing suitable found, returning NULL\n",
856 queBytes );
857 R_RELEASE_MUTEX(pool);
858 return NULL;
859 }
860 /*
861 * we found a best block. If Addr isn't already aligned, we've pre-qualified that
862 * there's room at the beginning of the block for a free block...
863 */
864 {
865 void* Addr = RHdrToBody(BestBlock);
866 if ( BestAlignedAddr != Addr )
867 {
868 PR_FREE NewFreeBlock = RFreeSplit (
869 pool,
870 BestBlock,
871 (char*)RBodyToHdr(BestAlignedAddr) - (char*)BestBlock );
872 ASSERT ( BestAlignedAddr > Addr );
873
874 //DPRINT ( "breaking off preceding bytes into their own block...\n" );
875 /*DPRINT ( "NewFreeBlock 0x%x Size %lu (Old Block's new size %lu) NextFree 0x%x\n",
876 NewFreeBlock, NewFreeBlock->Size, BestBlock->Size, BestBlock->NextFree );*/
877
878 /* we want the following code to use our size-aligned block */
879 BestPreviousBlock = BestBlock;
880 BestBlock = NewFreeBlock;
881
882 //VerifyPagedPool();
883 }
884 }
885 /*
886 * Is there enough space to create a second block from the unused portion.
887 */
888 if ( (BestBlock->Size - BlockSize) > sizeof(R_FREE) )
889 {
890 /*DPRINT("BestBlock 0x%x Size 0x%x BlockSize 0x%x NewSize 0x%x\n",
891 BestBlock, BestBlock->Size, BlockSize, NewSize );*/
892
893 /*
894 * Create the new free block.
895 */
896 NextBlock = RFreeSplit ( pool, BestBlock, BlockSize );
897 //ASSERT_SIZE ( NextBlock->Size );
898 }
899 /*
900 * Remove the selected block from the list of free blocks.
901 */
902 //DPRINT ( "Removing selected block from free block list\n" );
903 RPoolRemoveFree ( pool, BestBlock );
904 /*
905 * Create the new used block header.
906 */
907 NewBlock = (PR_USED)BestBlock;
908 RiUsedInit ( NewBlock, Tag );
909
910 /* RtlZeroMemory(RHdrToBody(NewBlock), NumberOfBytes);*/
911
912 RiUsedInitRedZone ( NewBlock, NumberOfBytes );
913 R_RELEASE_MUTEX(pool);
914
915 return RHdrToBody(NewBlock);
916 }
917
918 static void
919 RPoolFree ( PR_POOL pool, void* Addr )
920 {
921 PR_USED UsedBlock;
922 rulong UsedSize;
923 PR_FREE FreeBlock;
924 rulong UserSize;
925 int que;
926
927 ASSERT(pool);
928 if ( !Addr )
929 {
930 R_DEBUG("Attempt to free NULL ptr, initiating Red Zone Check\n" );
931 R_ACQUIRE_MUTEX(pool);
932 RPoolRedZoneCheck ( pool, __FILE__, __LINE__ );
933 R_RELEASE_MUTEX(pool);
934 return;
935 }
936 R_ASSERT_PTR(pool,Addr);
937
938 UsedBlock = RBodyToHdr(Addr);
939 UsedSize = UsedBlock->Size;
940 FreeBlock = (PR_FREE)UsedBlock;
941 #if R_RZ
942 UserSize = UsedBlock->UserSize;
943 #else
944 UserSize = UsedSize - sizeof(R_USED) - 2*R_RZ;
945 #endif//R_RZ
946
947 RUsedRedZoneCheck ( pool, UsedBlock, Addr, __FILE__, __LINE__ );
948
949 #if R_RZ
950 memset ( Addr, 0xCD, UsedBlock->UserSize );
951 #endif
952
953 que = RQueWhich ( UserSize );
954 if ( que >= 0 )
955 {
956 int queBytes = 16 << que;
957 ASSERT( (rulong)queBytes >= UserSize );
958 if ( que >= 0 )
959 {
960 int align = 0;
961 if ( R_ROUND_UP(Addr,pool->Alignments[2]) == Addr )
962 align = 2;
963 else if ( R_ROUND_UP(Addr,pool->Alignments[1]) == Addr )
964 align = 1;
965 R_ACQUIRE_MUTEX(pool);
966 RQueAdd ( &pool->Que[que][align], UsedBlock );
967 R_RELEASE_MUTEX(pool);
968 return;
969 }
970 }
971
972 R_ACQUIRE_MUTEX(pool);
973 RPoolReclaim ( pool, FreeBlock );
974 R_RELEASE_MUTEX(pool);
975 }
976
977 VOID
978 INIT_FUNCTION
979 NTAPI
980 MmInitializePagedPool(VOID)
981 {
982 /*
983 * We are still at a high IRQL level at this point so explicitly commit
984 * the first page of the paged pool before writing the first block header.
985 */
986 MmCommitPagedPoolAddress ( (PVOID)MmPagedPoolBase, FALSE );
987
988 MmPagedPool = RPoolInit ( MmPagedPoolBase,
989 MmPagedPoolSize,
990 MM_POOL_ALIGNMENT,
991 MM_CACHE_LINE_SIZE,
992 PAGE_SIZE );
993
994 ExInitializeFastMutex(&MmPagedPool->Mutex);
995 }
996
997 PVOID NTAPI
998 ExAllocatePagedPoolWithTag (IN POOL_TYPE PoolType,
999 IN ULONG NumberOfBytes,
1000 IN ULONG Tag)
1001 {
1002 int align;
1003
1004 if ( NumberOfBytes >= PAGE_SIZE )
1005 align = 2;
1006 else if ( PoolType & CACHE_ALIGNED_POOL_MASK )
1007 align = 1;
1008 else
1009 align = 0;
1010
1011 ASSERT_IRQL_LESS_OR_EQUAL(APC_LEVEL);
1012
1013 return RPoolAlloc ( MmPagedPool, NumberOfBytes, Tag, align );
1014 }
1015
1016 VOID NTAPI
1017 ExFreePagedPool(IN PVOID Block)
1018 {
1019 ASSERT_IRQL_LESS_OR_EQUAL(APC_LEVEL);
1020 RPoolFree ( MmPagedPool, Block );
1021 }
1022
1023 /* EOF */