- Fixed the freeing of memory from boot load drivers.
[reactos.git] / reactos / ntoskrnl / mm / ppool.c
1 /* $Id: ppool.c,v 1.20 2003/10/12 17:05:48 hbirr Exp $
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/ppool.c
6 * PURPOSE: Implements the paged pool
7 * PROGRAMMER: David Welch (welch@mcmail.com)
8 * UPDATE HISTORY:
9 * Created 22/05/98
10 */
11
12 /* INCLUDES *****************************************************************/
13
14 #include <ddk/ntddk.h>
15 #include <internal/pool.h>
16 #include <internal/mm.h>
17
18 #define NDEBUG
19 #include <internal/debug.h>
20
21 /* GLOBALS *******************************************************************/
22
23 /* Enable strict checking of the paged pool on every allocation */
24 #define ENABLE_VALIDATE_POOL
25
26 #undef assert
27 #define assert(x) if (!(x)) {DbgPrint("Assertion "#x" failed at %s:%d\n", __FILE__,__LINE__); KeBugCheck(0); }
28 #define ASSERT_SIZE(n) assert ( (n) <= MmPagedPoolSize && (n) >= 0 )
29 #define ASSERT_PTR(p) assert ( ((size_t)(p)) >= ((size_t)MmPagedPoolBase) && ((size_t)(p)) < ((size_t)(MmPagedPoolBase+MmPagedPoolSize)) )
30
31 // to disable buffer over/under-run detection, set the following macro to 0
32 #define MM_PPOOL_REDZONE_BYTES 4
33 #define MM_PPOOL_REDZONE_VALUE 0xCD
34
35 typedef struct _MM_PPOOL_FREE_BLOCK_HEADER
36 {
37 ULONG Size;
38 struct _MM_PPOOL_FREE_BLOCK_HEADER* NextFree;
39 } MM_PPOOL_FREE_BLOCK_HEADER, *PMM_PPOOL_FREE_BLOCK_HEADER;
40
41 typedef struct _MM_PPOOL_USED_BLOCK_HEADER
42 {
43 ULONG Size;
44 #if MM_PPOOL_REDZONE_BYTES
45 ULONG UserSize; // how many bytes the user actually asked for...
46 struct _MM_PPOOL_USED_BLOCK_HEADER* NextUsed;
47 #endif//MM_PPOOL_REDZONE_BYTES
48 } MM_PPOOL_USED_BLOCK_HEADER, *PMM_PPOOL_USED_BLOCK_HEADER;
49
50 PVOID MmPagedPoolBase;
51 ULONG MmPagedPoolSize;
52 static FAST_MUTEX MmPagedPoolLock;
53 static PMM_PPOOL_FREE_BLOCK_HEADER MmPagedPoolFirstFreeBlock;
54 #if MM_PPOOL_REDZONE_BYTES
55 static PMM_PPOOL_USED_BLOCK_HEADER MmPagedPoolFirstUsedBlock;
56 #endif//MM_PPOOL_REDZONE_BYTES
57
58 /* FUNCTIONS *****************************************************************/
59
60 inline static void* block_to_address ( PVOID blk )
61 /*
62 * FUNCTION: Translate a block header address to the corresponding block
63 * address (internal)
64 */
65 {
66 return ( (void *) ((char*)blk + sizeof(MM_PPOOL_USED_BLOCK_HEADER) + MM_PPOOL_REDZONE_BYTES) );
67 }
68
69 inline static PMM_PPOOL_USED_BLOCK_HEADER address_to_block(PVOID addr)
70 {
71 return (PMM_PPOOL_USED_BLOCK_HEADER)
72 ( ((char*)addr) - sizeof(MM_PPOOL_USED_BLOCK_HEADER) - MM_PPOOL_REDZONE_BYTES );
73 }
74
75 VOID INIT_FUNCTION
76 MmInitializePagedPool(VOID)
77 {
78 MmPagedPoolFirstFreeBlock = (PMM_PPOOL_FREE_BLOCK_HEADER)MmPagedPoolBase;
79 /*
80 * We are still at a high IRQL level at this point so explicitly commit
81 * the first page of the paged pool before writing the first block header.
82 */
83 MmCommitPagedPoolAddress((PVOID)MmPagedPoolFirstFreeBlock);
84 MmPagedPoolFirstFreeBlock->Size = MmPagedPoolSize;
85 MmPagedPoolFirstFreeBlock->NextFree = NULL;
86
87 #if MM_PPOOL_REDZONE_BYTES
88 MmPagedPoolFirstUsedBlock = NULL;
89 #endif//MM_PPOOL_REDZONE_BYTES
90
91 ExInitializeFastMutex(&MmPagedPoolLock);
92 }
93
94 #ifdef ENABLE_VALIDATE_POOL
95 static void VerifyPagedPool ( int line )
96 {
97 PMM_PPOOL_FREE_BLOCK_HEADER p = MmPagedPoolFirstFreeBlock;
98 int count = 0;
99 DPRINT ( "VerifyPagedPool(%i):\n", line );
100 while ( p )
101 {
102 DPRINT ( " 0x%x: %lu bytes (next 0x%x)\n", p, p->Size, p->NextFree );
103 ASSERT_PTR(p);
104 ASSERT_SIZE(p->Size);
105 count++;
106 p = p->NextFree;
107 }
108 DPRINT ( "VerifyPagedPool(%i): (%lu blocks)\n", line, count );
109 }
110 #define VerifyPagedPool() VerifyPagedPool(__LINE__)
111 #else
112 #define VerifyPagedPool()
113 #endif
114
115 VOID STDCALL
116 MmDbgPagedPoolRedZoneCheck ( const char* file, int line )
117 {
118 #if MM_PPOOL_REDZONE_BYTES
119 PMM_PPOOL_USED_BLOCK_HEADER pUsed = MmPagedPoolFirstUsedBlock;
120 int i;
121 BOOL bLow = TRUE;
122 BOOL bHigh = TRUE;
123
124 while ( pUsed )
125 {
126 PUCHAR Addr = (PUCHAR)block_to_address(pUsed);
127 for ( i = 0; i < MM_PPOOL_REDZONE_BYTES; i++ )
128 {
129 bLow = bLow && ( *(Addr-i-1) == MM_PPOOL_REDZONE_VALUE );
130 bHigh = bHigh && ( *(Addr+pUsed->UserSize+i) == MM_PPOOL_REDZONE_VALUE );
131 }
132 if ( !bLow || !bHigh )
133 {
134 const char* violation = "High and Low-side";
135 if ( bHigh ) // high is okay, so it was just low failed
136 violation = "Low-side";
137 else if ( bLow ) // low side is okay, so it was just high failed
138 violation = "High-side";
139 DbgPrint("%s(%i): %s redzone violation detected for paged pool address 0x%x\n",
140 file, line, violation, Addr );
141 KEBUGCHECK(0);
142 }
143 pUsed = pUsed->NextUsed;
144 }
145 #endif//MM_PPOOL_REDZONE_BYTES
146 }
147
148 /**********************************************************************
149 * NAME INTERNAL
150 * ExAllocatePagedPoolWithTag@12
151 *
152 * DESCRIPTION
153 *
154 * ARGUMENTS
155 *
156 * RETURN VALUE
157 */
158 PVOID STDCALL
159 ExAllocatePagedPoolWithTag (IN POOL_TYPE PoolType,
160 IN ULONG NumberOfBytes,
161 IN ULONG Tag)
162 {
163 PMM_PPOOL_FREE_BLOCK_HEADER BestBlock;
164 PMM_PPOOL_FREE_BLOCK_HEADER CurrentBlock;
165 ULONG BlockSize;
166 PMM_PPOOL_USED_BLOCK_HEADER NewBlock;
167 PMM_PPOOL_FREE_BLOCK_HEADER NextBlock;
168 PMM_PPOOL_FREE_BLOCK_HEADER PreviousBlock;
169 PMM_PPOOL_FREE_BLOCK_HEADER BestPreviousBlock;
170 PVOID BlockAddress;
171 ULONG Alignment;
172
173 ExAcquireFastMutex(&MmPagedPoolLock);
174
175 /*
176 * Don't bother allocating anything for a zero-byte block.
177 */
178 if (NumberOfBytes == 0)
179 {
180 MmDbgPagedPoolRedZoneCheck(__FILE__,__LINE__);
181 ExReleaseFastMutex(&MmPagedPoolLock);
182 return(NULL);
183 }
184
185 DPRINT ( "ExAllocatePagedPoolWithTag(%i,%lu,%lu)\n", PoolType, NumberOfBytes, Tag );
186 VerifyPagedPool();
187
188 if (NumberOfBytes >= PAGE_SIZE)
189 {
190 Alignment = PAGE_SIZE;
191 }
192 else if (PoolType == PagedPoolCacheAligned)
193 {
194 Alignment = MM_CACHE_LINE_SIZE;
195 }
196 else
197 {
198 Alignment = 0;
199 }
200
201 /*
202 * Calculate the total number of bytes we will need.
203 */
204 BlockSize = NumberOfBytes + sizeof(MM_PPOOL_USED_BLOCK_HEADER) + 2*MM_PPOOL_REDZONE_BYTES;
205 if (BlockSize < sizeof(MM_PPOOL_FREE_BLOCK_HEADER))
206 {
207 /* At least we need the size of the free block header. */
208 BlockSize = sizeof(MM_PPOOL_FREE_BLOCK_HEADER);
209 }
210
211
212 /*
213 * Find the best-fitting block.
214 */
215 PreviousBlock = NULL;
216 BestPreviousBlock = BestBlock = NULL;
217 CurrentBlock = MmPagedPoolFirstFreeBlock;
218 if ( Alignment > 0 )
219 {
220 PVOID BestAlignedAddr = NULL;
221 while ( CurrentBlock != NULL )
222 {
223 PVOID Addr = block_to_address(CurrentBlock);
224 PVOID CurrentBlockEnd = (PVOID)CurrentBlock + CurrentBlock->Size;
225 /* calculate last size-aligned address available within this block */
226 PVOID AlignedAddr = MM_ROUND_DOWN(CurrentBlockEnd-NumberOfBytes-MM_PPOOL_REDZONE_BYTES, Alignment);
227 assert ( AlignedAddr+NumberOfBytes+MM_PPOOL_REDZONE_BYTES <= CurrentBlockEnd );
228
229 /* special case, this address is already size-aligned, and the right size */
230 if ( Addr == AlignedAddr )
231 {
232 BestAlignedAddr = AlignedAddr;
233 BestPreviousBlock = PreviousBlock;
234 BestBlock = CurrentBlock;
235 break;
236 }
237 else if ( Addr < (PVOID)address_to_block(AlignedAddr) )
238 {
239 /*
240 * there's enough room to allocate our size-aligned memory out
241 * of this block, see if it's a better choice than any previous
242 * finds
243 */
244 if ( BestBlock == NULL || BestBlock->Size > CurrentBlock->Size )
245 {
246 BestAlignedAddr = AlignedAddr;
247 BestPreviousBlock = PreviousBlock;
248 BestBlock = CurrentBlock;
249 }
250 }
251
252 PreviousBlock = CurrentBlock;
253 CurrentBlock = CurrentBlock->NextFree;
254 }
255
256 /*
257 * we found a best block can/should we chop a few bytes off the beginning
258 * into a separate memory block?
259 */
260 if ( BestBlock != NULL )
261 {
262 PVOID Addr = block_to_address(BestBlock);
263 if ( BestAlignedAddr != Addr )
264 {
265 PMM_PPOOL_FREE_BLOCK_HEADER NewFreeBlock =
266 (PMM_PPOOL_FREE_BLOCK_HEADER)address_to_block(BestAlignedAddr);
267 assert ( BestAlignedAddr > Addr );
268 NewFreeBlock->Size = Addr + BestBlock->Size - BestAlignedAddr;
269 ASSERT_SIZE(NewFreeBlock->Size);
270 BestBlock->Size = (size_t)NewFreeBlock - (size_t)Addr;
271 ASSERT_SIZE(BestBlock->Size);
272
273 DPRINT ( "breaking off preceding bytes into their own block...\n" );
274 DPRINT ( "NewFreeBlock 0x%x Size %lu (Old Block's new size %lu) NextFree 0x%x\n",
275 NewFreeBlock, NewFreeBlock->Size, BestBlock->Size, BestBlock->NextFree );
276
277 /* insert the new block into the chain */
278 NewFreeBlock->NextFree = BestBlock->NextFree;
279 BestBlock->NextFree = NewFreeBlock;
280
281 /* we want the following code to use our size-aligned block */
282 BestPreviousBlock = BestBlock;
283 BestBlock = NewFreeBlock;
284
285 //VerifyPagedPool();
286 }
287 }
288 }
289 /*
290 * non-size-aligned block search
291 */
292 else while ( CurrentBlock != NULL )
293 {
294 if ( CurrentBlock->Size >= BlockSize
295 && ( BestBlock == NULL || BestBlock->Size > CurrentBlock->Size )
296 )
297 {
298 BestPreviousBlock = PreviousBlock;
299 BestBlock = CurrentBlock;
300 }
301
302 PreviousBlock = CurrentBlock;
303 CurrentBlock = CurrentBlock->NextFree;
304 }
305
306 /*
307 * We didn't find anything suitable at all.
308 */
309 if (BestBlock == NULL)
310 {
311 DPRINT("ExAllocatePagedPoolWithTag() - nothing suitable found, returning NULL\n" );
312 ExReleaseFastMutex(&MmPagedPoolLock);
313 return(NULL);
314 }
315
316 DPRINT("BestBlock 0x%x NextFree 0x%x\n", BestBlock, BestBlock->NextFree );
317
318 //VerifyPagedPool();
319
320 /*
321 * Is there enough space to create a second block from the unused portion.
322 */
323 if ( BestBlock->Size > BlockSize
324 && (BestBlock->Size - BlockSize) > sizeof(MM_PPOOL_FREE_BLOCK_HEADER)
325 )
326 {
327 ULONG NewSize = BestBlock->Size - BlockSize;
328 ASSERT_SIZE ( NewSize );
329
330 //DPRINT("creating 2nd block from unused portion\n");
331 DPRINT("BestBlock 0x%x Size 0x%x BlockSize 0x%x NewSize 0x%x\n",
332 BestBlock, BestBlock->Size, BlockSize, NewSize );
333
334 /*
335 * Create the new free block.
336 */
337 //DPRINT("creating the new free block");
338 NextBlock = (PMM_PPOOL_FREE_BLOCK_HEADER)((char*)BestBlock + BlockSize);
339 //DPRINT(".");
340 NextBlock->Size = NewSize;
341 ASSERT_SIZE ( NextBlock->Size );
342 //DPRINT(".");
343 NextBlock->NextFree = BestBlock->NextFree;
344 //DPRINT(".\n");
345
346 /*
347 * Replace the old free block with it.
348 */
349 //DPRINT("replacing old free block with it");
350 if (BestPreviousBlock == NULL)
351 {
352 //DPRINT("(from beginning)");
353 MmPagedPoolFirstFreeBlock = NextBlock;
354 }
355 else
356 {
357 //DPRINT("(from previous)");
358 BestPreviousBlock->NextFree = NextBlock;
359 }
360 //DPRINT(".\n");
361
362 /*
363 * Create the new used block header.
364 */
365 //DPRINT("create new used block header");
366 NewBlock = (PMM_PPOOL_USED_BLOCK_HEADER)BestBlock;
367 //DPRINT(".");
368 NewBlock->Size = BlockSize;
369 ASSERT_SIZE ( NewBlock->Size );
370 //DPRINT(".\n");
371 }
372 else
373 {
374 ULONG NewSize = BestBlock->Size;
375
376 /*
377 * Remove the selected block from the list of free blocks.
378 */
379 //DPRINT ( "Removing selected block from free block list\n" );
380 if (BestPreviousBlock == NULL)
381 {
382 MmPagedPoolFirstFreeBlock = BestBlock->NextFree;
383 }
384 else
385 {
386 BestPreviousBlock->NextFree = BestBlock->NextFree;
387 }
388
389 /*
390 * Set up the header of the new block
391 */
392 NewBlock = (PMM_PPOOL_USED_BLOCK_HEADER)BestBlock;
393 NewBlock->Size = NewSize;
394 ASSERT_SIZE ( NewBlock->Size );
395 }
396
397 #if MM_PPOOL_REDZONE_BYTES
398 // now add the block to the used block list
399 NewBlock->NextUsed = MmPagedPoolFirstUsedBlock;
400 MmPagedPoolFirstUsedBlock = NewBlock;
401 #endif//MM_PPOOL_REDZONE_BYTES
402
403 VerifyPagedPool();
404
405 ExReleaseFastMutex(&MmPagedPoolLock);
406
407 BlockAddress = block_to_address ( NewBlock );
408
409 memset(BlockAddress, 0, NumberOfBytes);
410
411 #if MM_PPOOL_REDZONE_BYTES
412 NewBlock->UserSize = NumberOfBytes;
413 // write out buffer-overrun detection bytes
414 {
415 PUCHAR Addr = (PUCHAR)BlockAddress;
416 //DbgPrint ( "writing buffer-overrun detection bytes" );
417 memset ( Addr - MM_PPOOL_REDZONE_BYTES,
418 MM_PPOOL_REDZONE_VALUE, MM_PPOOL_REDZONE_BYTES );
419 memset ( Addr + NewBlock->UserSize, MM_PPOOL_REDZONE_VALUE,
420 MM_PPOOL_REDZONE_BYTES );
421 /*for ( i = 0; i < MM_PPOOL_REDZONE_BYTES; i++ )
422 {
423 //DbgPrint(".");
424 *(Addr-i-1) = 0xCD;
425 //DbgPrint("o");
426 *(Addr+NewBlock->UserSize+i) = 0xCD;
427 }*/
428 //DbgPrint ( "done!\n" );
429 }
430
431 #endif//MM_PPOOL_REDZONE_BYTES
432
433 return(BlockAddress);
434 }
435
436 VOID STDCALL
437 ExFreePagedPool(IN PVOID Block)
438 {
439 PMM_PPOOL_FREE_BLOCK_HEADER PreviousBlock;
440 PMM_PPOOL_USED_BLOCK_HEADER UsedBlock = address_to_block(Block);
441 ULONG UsedSize = UsedBlock->Size;
442 PMM_PPOOL_FREE_BLOCK_HEADER FreeBlock =
443 (PMM_PPOOL_FREE_BLOCK_HEADER)UsedBlock;
444 PMM_PPOOL_FREE_BLOCK_HEADER NextBlock;
445 PMM_PPOOL_FREE_BLOCK_HEADER NextNextBlock;
446
447 #if MM_PPOOL_REDZONE_BYTES
448 // write out buffer-overrun detection bytes
449 {
450 int i;
451 PUCHAR Addr = (PUCHAR)Block;
452 //DbgPrint ( "checking buffer-overrun detection bytes..." );
453 for ( i = 0; i < MM_PPOOL_REDZONE_BYTES; i++ )
454 {
455 assert ( *(Addr-i-1) == MM_PPOOL_REDZONE_VALUE );
456 assert ( *(Addr+UsedBlock->UserSize+i) == MM_PPOOL_REDZONE_VALUE );
457 }
458 //DbgPrint ( "done!\n" );
459 }
460 #endif//MM_PPOOL_REDZONE_BYTES
461
462 ExAcquireFastMutex(&MmPagedPoolLock);
463
464 #if MM_PPOOL_REDZONE_BYTES
465 // remove from used list...
466 {
467 PMM_PPOOL_USED_BLOCK_HEADER pPrev = MmPagedPoolFirstUsedBlock;
468 if ( pPrev == UsedBlock )
469 {
470 // special-case, our freeing block is first in list...
471 MmPagedPoolFirstUsedBlock = pPrev->NextUsed;
472 }
473 else
474 {
475 while ( pPrev && pPrev->NextUsed != UsedBlock )
476 pPrev = pPrev->NextUsed;
477 // if this assert fails - memory has been corrupted
478 // ( or I have a logic error...! )
479 assert ( pPrev->NextUsed == UsedBlock );
480 pPrev->NextUsed = UsedBlock->NextUsed;
481 }
482 }
483 #endif//MM_PPOOL_REDZONE_BYTES
484
485 /*
486 * Begin setting up the newly freed block's header.
487 */
488 FreeBlock->Size = UsedSize;
489 ASSERT_SIZE ( FreeBlock->Size );
490
491 /*
492 * Find the blocks immediately before and after the newly freed block on the free list.
493 */
494 PreviousBlock = NULL;
495 NextBlock = MmPagedPoolFirstFreeBlock;
496 while (NextBlock != NULL && NextBlock < FreeBlock)
497 {
498 PreviousBlock = NextBlock;
499 NextBlock = NextBlock->NextFree;
500 }
501
502 /*
503 * Insert the freed block on the free list.
504 */
505 if (PreviousBlock == NULL)
506 {
507 FreeBlock->NextFree = MmPagedPoolFirstFreeBlock;
508 MmPagedPoolFirstFreeBlock = FreeBlock;
509 }
510 else
511 {
512 PreviousBlock->NextFree = FreeBlock;
513 FreeBlock->NextFree = NextBlock;
514 }
515
516 /*
517 * If the next block is immediately adjacent to the newly freed one then
518 * merge them.
519 */
520 if (NextBlock != NULL &&
521 ((char*)FreeBlock + FreeBlock->Size) == (char*)NextBlock)
522 {
523 FreeBlock->Size = FreeBlock->Size + NextBlock->Size;
524 ASSERT_SIZE ( FreeBlock->Size );
525 FreeBlock->NextFree = NextBlock->NextFree;
526 NextNextBlock = NextBlock->NextFree;
527 }
528 else
529 {
530 NextNextBlock = NextBlock;
531 }
532
533 /*
534 * If the previous block is adjacent to the newly freed one then
535 * merge them.
536 */
537 if (PreviousBlock != NULL &&
538 ((char*)PreviousBlock + PreviousBlock->Size) == (char*)FreeBlock)
539 {
540 PreviousBlock->Size = PreviousBlock->Size + FreeBlock->Size;
541 ASSERT_SIZE ( PreviousBlock->Size );
542 PreviousBlock->NextFree = NextNextBlock;
543 }
544
545 VerifyPagedPool();
546
547 ExReleaseFastMutex(&MmPagedPoolLock);
548 }
549
550 /* EOF */