typedef struct tagARENA_INUSE
{
- DWORD size; /* Block size; must be the first field */
- DWORD magic : 24; /* Magic number */
+ SIZE_T size; /* Block size; must be the first field */
+ DWORD magic : 23; /* Magic number */
+ DWORD has_user_data : 1; /* There is user data associated with this block */
DWORD unused_bytes : 8; /* Number of bytes in the block not used by user data (max value is HEAP_MIN_DATA_SIZE+HEAP_MIN_SHRINK_SIZE) */
} ARENA_INUSE;
typedef struct tagARENA_FREE
{
- DWORD size; /* Block size; must be the first field */
+ SIZE_T size; /* Block size; must be the first field */
DWORD magic; /* Magic number */
struct list entry; /* Entry in free list */
} ARENA_FREE;
#define ARENA_FLAG_FREE 0x00000001 /* flags OR'ed with arena size */
#define ARENA_FLAG_PREV_FREE 0x00000002
-#define ARENA_SIZE_MASK (~3)
#define ARENA_INUSE_MAGIC 0x455355 /* Value for arena 'magic' field */
#define ARENA_FREE_MAGIC 0x45455246 /* Value for arena 'magic' field */
+#ifndef _WIN64
+#define ARENA_SIZE_MASK (~3L)
+#else
+#define ARENA_SIZE_MASK (~7L)
+#endif
+
#define ARENA_INUSE_FILLER 0x55
#define ARENA_FREE_FILLER 0xaa
-#define ALIGNMENT 8 /* everything is aligned on 8 byte boundaries */
-#define ROUND_SIZE(size) (((size) + ALIGNMENT - 1) & ~(ALIGNMENT-1))
+/* everything is aligned on 8 byte boundaries (16 for Win64)*/
+#define ALIGNMENT (2*sizeof(void*))
+#define ARENA_OFFSET (ALIGNMENT - sizeof(ARENA_INUSE))
+
+#define ROUND_SIZE(size) ((((size) + ALIGNMENT - 1) & ~(ALIGNMENT-1)) + ARENA_OFFSET)
+
#define QUIET 1 /* Suppress messages */
#define NOISY 0 /* Report all errors */
/* minimum data size (without arenas) of an allocated block */
-#define HEAP_MIN_DATA_SIZE 16
+#define HEAP_MIN_DATA_SIZE ROUND_SIZE(2 * sizeof(struct list))
/* minimum size that must remain to shrink an allocated block */
#define HEAP_MIN_SHRINK_SIZE (HEAP_MIN_DATA_SIZE+sizeof(ARENA_FREE))
-#define HEAP_NB_FREE_LISTS 4 /* Number of free lists */
+#define HEAP_NB_FREE_LISTS 5 /* Number of free lists */
/* Max size of the blocks on the free lists */
static const DWORD HEAP_freeListSizes[HEAP_NB_FREE_LISTS] =
{
- 0x20, 0x80, 0x200, ~0UL
+ 0x10, 0x20, 0x80, 0x200, MAXULONG
};
-typedef struct
+typedef union
{
ARENA_FREE arena;
+ void *aligment[4];
} FREE_LIST_ENTRY;
struct tagHEAP;
typedef struct tagSUBHEAP
{
- DWORD size; /* Size of the whole sub-heap */
- DWORD commitSize; /* Committed size of the sub-heap */
+ SIZE_T size; /* Size of the whole sub-heap */
+ SIZE_T commitSize; /* Committed size of the sub-heap */
DWORD headerSize; /* Size of the heap header */
struct tagSUBHEAP *next; /* Next sub-heap */
struct tagHEAP *heap; /* Main heap structure */
DWORD magic; /* Magic number */
- ULONG UserFlags;
- PVOID UserValue;
} SUBHEAP;
#define SUBHEAP_MAGIC ((DWORD)('S' | ('U'<<8) | ('B'<<16) | ('H'<<24)))
+typedef struct tagHEAP_USER_DATA
+{
+ LIST_ENTRY ListEntry;
+ PVOID BaseAddress;
+ ULONG UserFlags;
+ PVOID UserValue;
+} HEAP_USER_DATA, *PHEAP_USER_DATA;
+
typedef struct tagHEAP
{
SUBHEAP subheap; /* First sub-heap */
struct list entry; /* Entry in process heap list */
- RTL_CRITICAL_SECTION critSection; /* Critical section for serialization */
- FREE_LIST_ENTRY freeList[HEAP_NB_FREE_LISTS]; /* Free lists */
+ HEAP_LOCK lock; /* Critical section for serialization */
+ DECLSPEC_ALIGN(8) FREE_LIST_ENTRY freeList[HEAP_NB_FREE_LISTS]; /* Free lists */
DWORD flags; /* Heap flags */
DWORD magic; /* Magic number */
PRTL_HEAP_COMMIT_ROUTINE commitRoutine;
+ LIST_ENTRY UserDataHead;
} HEAP;
#define HEAP_MAGIC ((DWORD)('H' | ('E'<<8) | ('A'<<16) | ('P'<<24)))
return i;
}
+/* get the memory protection type to use for a given heap */
+static inline ULONG get_protection_type( DWORD flags )
+{
+ return (flags & HEAP_CREATE_ENABLE_EXECUTE) ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+}
+
static RTL_CRITICAL_SECTION_DEBUG process_heap_critsect_debug =
{
0, 0, NULL, /* will be set later */
{ &process_heap_critsect_debug.ProcessLocksList, &process_heap_critsect_debug.ProcessLocksList },
- 0, 0, { (DWORD_PTR)(__FILE__ ": main process heap section") }
+ 0, 0, 0, 0, 0
};
/***********************************************************************
}
}
DPRINTF( "\nTotal: Size=%08lx Committed=%08lx Free=%08lx Used=%08lx Arenas=%08lx (%ld%%)\n\n",
- subheap->size, subheap->commitSize, freeSize, usedSize,
- arenaSize, (arenaSize * 100) / subheap->size );
+ subheap->size, subheap->commitSize, freeSize, usedSize,
+ arenaSize, (arenaSize * 100) / subheap->size );
subheap = subheap->next;
}
}
}
#endif
+static PHEAP_USER_DATA HEAP_GetUserData(HEAP *heapPtr, PVOID BaseAddress)
+{
+ PLIST_ENTRY CurrentEntry;
+ PHEAP_USER_DATA udata;
+
+ CurrentEntry = heapPtr->UserDataHead.Flink;
+ while (CurrentEntry != &heapPtr->UserDataHead)
+ {
+ udata = CONTAINING_RECORD(CurrentEntry, HEAP_USER_DATA, ListEntry);
+ if (udata->BaseAddress == BaseAddress)
+ return udata;
+ CurrentEntry = CurrentEntry->Flink;
+ }
+ return NULL;
+}
+
+static PHEAP_USER_DATA HEAP_AllocUserData(HEAP *heapPtr, PVOID BaseAddress)
+{
+ /* Allocate user data entry */
+ ARENA_INUSE *pInUse;
+ PHEAP_USER_DATA udata = RtlAllocateHeap(heapPtr, 0, sizeof(HEAP_USER_DATA));
+ if (!udata) return NULL;
+ udata->BaseAddress = BaseAddress;
+ InsertTailList(&heapPtr->UserDataHead, &udata->ListEntry);
+ pInUse = (ARENA_INUSE *)BaseAddress - 1;
+ pInUse->has_user_data = 1;
+ return udata;
+}
+
/***********************************************************************
* HEAP_GetPtr
* RETURNS
HEAP *heapPtr = (HEAP *)heap;
if (!heapPtr || (heapPtr->magic != HEAP_MAGIC))
{
- ERR("Invalid heap %p!\n", heap );
+ if (heapPtr)
+ ERR("Invalid heap %p, magic:%.4s!\n", heap, &heapPtr->magic );
+ else
+ ERR("Invalid heap %p!\n", heap );
+ //KeDumpStackFrames(NULL);
return NULL;
}
if (TRACE_ON(heap) && !HEAP_IsRealArena( heapPtr, 0, NULL, NOISY ))
else
{
Status = ZwAllocateVirtualMemory( NtCurrentProcess(), &ptr, 0,
- &size, MEM_COMMIT, PAGE_READWRITE );
+ &size, MEM_COMMIT, get_protection_type(subheap->heap->flags) );
}
if (!NT_SUCCESS(Status))
{
{
ARENA_FREE *pFree;
SIZE_T size = (pArena->size & ARENA_SIZE_MASK) + sizeof(*pArena);
+ PHEAP_USER_DATA udata;
+
+ /* Find and free user data */
+ if (pArena->has_user_data)
+ {
+ udata = HEAP_GetUserData(subheap->heap, pArena + 1);
+ if (udata)
+ {
+ RemoveEntryList(&udata->ListEntry);
+ RtlFreeHeap(subheap->heap, 0, udata);
+ }
+ }
/* Check if we can merge with previous block */
int i;
NTSTATUS Status;
-#if 0
- if (ZwAllocateVirtualMemory( NtCurrentProcess(), &address, 0,
- &commitSize, MEM_COMMIT, PAGE_READWRITE ))
+ if (!address && ZwAllocateVirtualMemory( NtCurrentProcess(), &address, 0,
+ &commitSize, MEM_COMMIT, get_protection_type(flags) ))
{
WARN("Could not commit %08lx bytes for sub-heap %p\n", commitSize, address );
return FALSE;
}
-#endif
/* Fill the sub-heap structure */
heap->commitRoutine = Parameters->CommitRoutine;
else
heap->commitRoutine = NULL;
+ InitializeListHead(&heap->UserDataHead);
/* Build the free lists */
{
if (!processHeap) /* do it by hand to avoid memory allocations */
{
- heap->critSection.DebugInfo = &process_heap_critsect_debug;
- heap->critSection.LockCount = -1;
- heap->critSection.RecursionCount = 0;
- heap->critSection.OwningThread = 0;
- heap->critSection.LockSemaphore = 0;
- heap->critSection.SpinCount = 0;
- process_heap_critsect_debug.CriticalSection = &heap->critSection;
+ heap->lock.CriticalSection.DebugInfo = &process_heap_critsect_debug;
+ heap->lock.CriticalSection.LockCount = -1;
+ heap->lock.CriticalSection.RecursionCount = 0;
+ heap->lock.CriticalSection.OwningThread = 0;
+ heap->lock.CriticalSection.LockSemaphore = 0;
+ heap->lock.CriticalSection.SpinCount = 0;
+ process_heap_critsect_debug.CriticalSection = &heap->lock.CriticalSection;
}
- else RtlInitializeHeapLock( &heap->critSection );
+ else RtlInitializeHeapLock( &heap->lock );
}
}
0,
&commitSize,
MEM_COMMIT,
- PAGE_EXECUTE_READWRITE);
+ get_protection_type(flags));
}
if (!NT_SUCCESS(Status))
{
totalSize = (totalSize + 0xffff) & 0xffff0000;
commitSize = (commitSize + 0xffff) & 0xffff0000;
if (!commitSize) commitSize = 0x10000;
+ totalSize = min( totalSize, 0xffff0000 ); /* don't allow a heap larger than 4Gb */
if (totalSize < commitSize) totalSize = commitSize;
if (!address)
{
/* allocate the memory block */
if (ZwAllocateVirtualMemory( NtCurrentProcess(), &address, 0, &totalSize,
- MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE ))
+ MEM_RESERVE | MEM_COMMIT, get_protection_type(flags) ))
{
WARN("Could not allocate %08lx bytes\n", totalSize );
return NULL;
{
SUBHEAP *subheap;
struct list *ptr;
+ SIZE_T total_size;
FREE_LIST_ENTRY *pEntry = heap->freeList + get_freelist_index( size + sizeof(ARENA_INUSE) );
/* Find a suitable free list, and in it find a block large enough */
if (!(heap->flags & HEAP_GROWABLE))
{
- WARN("Not enough space in heap %p for %08lx bytes\n", heap, size );
+ ERR("Not enough space in heap %p for %08lx bytes\n", heap, size );
return NULL;
}
/* make sure that we have a big enough size *committed* to fit another
* So just one heap struct, one first free arena which will eventually
* get used, and a second free arena that might get assigned all remaining
* free space in HEAP_ShrinkBlock() */
- size += ROUND_SIZE(sizeof(SUBHEAP)) + sizeof(ARENA_INUSE) + sizeof(ARENA_FREE);
- if (!(subheap = HEAP_CreateSubHeap( heap, NULL, heap->flags, size,
- max( HEAP_DEF_SIZE, size ), NULL )))
+ total_size = size + ROUND_SIZE(sizeof(SUBHEAP)) + sizeof(ARENA_INUSE) + sizeof(ARENA_FREE);
+ if (total_size < size) return NULL; /* overflow */
+
+ if (!(subheap = HEAP_CreateSubHeap( heap, NULL, heap->flags, total_size,
+ max( HEAP_DEF_SIZE, total_size ), NULL )))
return NULL;
TRACE("created new sub-heap %p of %08lx bytes for heap %p\n",
char *heapEnd = (char *)subheap + subheap->size;
/* Check for unaligned pointers */
- if ( (ULONG_PTR)pArena % ALIGNMENT != 0 )
+ if ( (ULONG_PTR)pArena % ALIGNMENT != ARENA_OFFSET )
{
ERR("Heap %p: unaligned arena pointer %p\n", subheap->heap, pArena );
return FALSE;
const char *heapEnd = (const char *)subheap + subheap->size;
/* Check for unaligned pointers */
- if ( (ULONG_PTR)pArena % ALIGNMENT != 0 )
+ if ( (ULONG_PTR)pArena % ALIGNMENT != ARENA_OFFSET )
{
if ( quiet == NOISY )
{
flags |= heapPtr->flags;
/* calling HeapLock may result in infinite recursion, so do the critsect directly */
if (!(flags & HEAP_NO_SERIALIZE))
- RtlEnterHeapLock( &heapPtr->critSection );
+ RtlEnterHeapLock( &heapPtr->lock );
if (block)
{
ret = HEAP_ValidateInUseArena( subheap, (const ARENA_INUSE *)block - 1, quiet );
if (!(flags & HEAP_NO_SERIALIZE))
- RtlLeaveHeapLock( &heapPtr->critSection );
+ RtlLeaveHeapLock( &heapPtr->lock );
return ret;
}
subheap = subheap->next;
}
- if (!(flags & HEAP_NO_SERIALIZE)) RtlLeaveHeapLock( &heapPtr->critSection );
+ if (!(flags & HEAP_NO_SERIALIZE)) RtlLeaveHeapLock( &heapPtr->lock );
return ret;
}
if (processHeap)
{
HEAP *heapPtr = subheap->heap;
- RtlEnterHeapLock( &processHeap->critSection );
+ RtlEnterHeapLock( &processHeap->lock );
list_add_head( &processHeap->entry, &heapPtr->entry );
- RtlLeaveHeapLock( &processHeap->critSection );
+ RtlLeaveHeapLock( &processHeap->lock );
}
else
{
processHeap = subheap->heap; /* assume the first heap we create is the process main heap */
list_init( &processHeap->entry );
+ assert( (ULONG_PTR)processHeap->freeList % ALIGNMENT == ARENA_OFFSET );
}
}
return heap; /* cannot delete the main process heap */
/* remove it from the per-process list */
- RtlEnterHeapLock( &processHeap->critSection );
+ RtlEnterHeapLock( &processHeap->lock );
list_remove( &heapPtr->entry );
- RtlLeaveHeapLock( &processHeap->critSection );
+ RtlLeaveHeapLock( &processHeap->lock );
}
- RtlDeleteHeapLock( &heapPtr->critSection );
+ RtlDeleteHeapLock( &heapPtr->lock );
subheap = &heapPtr->subheap;
while (subheap)
{
PVOID NTAPI
RtlAllocateHeap(HANDLE heap, /* [in] Handle of private heap block */
ULONG flags, /* [in] Heap allocation control flags */
- ULONG size) /* [in] Number of bytes to allocate */
+ SIZE_T size) /* [in] Number of bytes to allocate */
{
ARENA_FREE *pArena;
ARENA_INUSE *pInUse;
//flags &= HEAP_GENERATE_EXCEPTIONS | HEAP_NO_SERIALIZE | HEAP_ZERO_MEMORY;
flags |= heapPtr->flags;
rounded_size = ROUND_SIZE(size);
+ if (rounded_size < size) /* overflow */
+ {
+ if (flags & HEAP_GENERATE_EXCEPTIONS) RtlRaiseStatus( STATUS_NO_MEMORY );
+ return NULL;
+ }
+
if (rounded_size < HEAP_MIN_DATA_SIZE) rounded_size = HEAP_MIN_DATA_SIZE;
- if (!(flags & HEAP_NO_SERIALIZE)) RtlEnterHeapLock( &heapPtr->critSection );
+ if (!(flags & HEAP_NO_SERIALIZE)) RtlEnterHeapLock( &heapPtr->lock );
/* Locate a suitable free block */
-
- /* Locate a suitable free block */
-
if (!(pArena = HEAP_FindFreeBlock( heapPtr, rounded_size, &subheap )))
{
TRACE("(%p,%08lx,%08lx): returning NULL\n",
heap, flags, size );
- if (!(flags & HEAP_NO_SERIALIZE)) RtlLeaveHeapLock( &heapPtr->critSection );
+ if (!(flags & HEAP_NO_SERIALIZE)) RtlLeaveHeapLock( &heapPtr->lock );
if (flags & HEAP_GENERATE_EXCEPTIONS) RtlRaiseStatus( STATUS_NO_MEMORY );
return NULL;
}
* so we have to add the difference to the size */
pInUse->size = (pInUse->size & ~ARENA_FLAG_FREE) + sizeof(ARENA_FREE) - sizeof(ARENA_INUSE);
pInUse->magic = ARENA_INUSE_MAGIC;
-
- /* Save user flags */
- subheap->UserFlags = flags & HEAP_SETTABLE_USER_FLAGS;
+ pInUse->has_user_data = 0;
/* Shrink the block */
pInUse->unused_bytes = (pInUse->size & ARENA_SIZE_MASK) - size;
if (flags & HEAP_ZERO_MEMORY)
- clear_block( pInUse + 1, pInUse->size & ARENA_SIZE_MASK );
+ {
+ clear_block( pInUse + 1, size );
+ mark_block_uninitialized( (char *)(pInUse + 1) + size, pInUse->unused_bytes );
+ }
else
mark_block_uninitialized( pInUse + 1, pInUse->size & ARENA_SIZE_MASK );
- if (!(flags & HEAP_NO_SERIALIZE)) RtlLeaveHeapLock( &heapPtr->critSection );
+ if (!(flags & HEAP_NO_SERIALIZE)) RtlLeaveHeapLock( &heapPtr->lock );
TRACE("(%p,%08lx,%08lx): returning %p\n", heap, flags, size, pInUse + 1 );
return (LPVOID)(pInUse + 1);
flags &= HEAP_NO_SERIALIZE;
flags |= heapPtr->flags;
- if (!(flags & HEAP_NO_SERIALIZE)) RtlEnterHeapLock( &heapPtr->critSection );
+ if (!(flags & HEAP_NO_SERIALIZE)) RtlEnterHeapLock( &heapPtr->lock );
if (!HEAP_IsRealArena( heapPtr, HEAP_NO_SERIALIZE, ptr, QUIET ))
{
- if (!(flags & HEAP_NO_SERIALIZE)) RtlLeaveHeapLock( &heapPtr->critSection );
+ if (!(flags & HEAP_NO_SERIALIZE)) RtlLeaveHeapLock( &heapPtr->lock );
RtlSetLastWin32ErrorAndNtStatusFromNtStatus( STATUS_INVALID_PARAMETER );
TRACE("(%p,%08lx,%p): returning FALSE\n", heap, flags, ptr );
return FALSE;
}
- /* Turn the block into a free block */
-
+ /* Some sanity checks */
pInUse = (ARENA_INUSE *)ptr - 1;
subheap = HEAP_FindSubHeap( heapPtr, pInUse );
+ if ((char *)pInUse < (char *)subheap + subheap->headerSize) goto error;
+ if (!HEAP_ValidateInUseArena( subheap, pInUse, QUIET )) goto error;
+
+ /* Turn the block into a free block */
+
HEAP_MakeInUseBlockFree( subheap, pInUse );
- if (!(flags & HEAP_NO_SERIALIZE)) RtlLeaveHeapLock( &heapPtr->critSection );
+ if (!(flags & HEAP_NO_SERIALIZE)) RtlLeaveHeapLock( &heapPtr->lock );
TRACE("(%p,%08lx,%p): returning TRUE\n", heap, flags, ptr );
return TRUE;
+
+error:
+ if (!(flags & HEAP_NO_SERIALIZE)) RtlLeaveHeapLock( &heapPtr->lock );
+ RtlSetLastWin32ErrorAndNtStatusFromNtStatus( STATUS_INVALID_PARAMETER );
+ TRACE("(%p,%08x,%p): returning FALSE\n", heap, flags, ptr );
+ return FALSE;
}
ARENA_INUSE *pArena;
HEAP *heapPtr;
SUBHEAP *subheap;
- SIZE_T oldSize, rounded_size;
+ SIZE_T oldBlockSize, oldActualSize, rounded_size;
if (!ptr) return NULL;
if (!(heapPtr = HEAP_GetPtr( heap )))
//Flags &= HEAP_GENERATE_EXCEPTIONS | HEAP_NO_SERIALIZE | HEAP_ZERO_MEMORY |
// HEAP_REALLOC_IN_PLACE_ONLY;
flags |= heapPtr->flags;
+ if (!(flags & HEAP_NO_SERIALIZE)) RtlEnterHeapLock( &heapPtr->lock );
+
rounded_size = ROUND_SIZE(size);
+ if (rounded_size < size) goto oom; /* overflow */
if (rounded_size < HEAP_MIN_DATA_SIZE) rounded_size = HEAP_MIN_DATA_SIZE;
- if (!(flags & HEAP_NO_SERIALIZE)) RtlEnterHeapLock( &heapPtr->critSection );
- if (!HEAP_IsRealArena( heapPtr, HEAP_NO_SERIALIZE, ptr, QUIET ))
- {
- if (!(flags & HEAP_NO_SERIALIZE)) RtlLeaveHeapLock( &heapPtr->critSection );
- RtlSetLastWin32ErrorAndNtStatusFromNtStatus( STATUS_INVALID_PARAMETER );
- TRACE("(%p,%08lx,%p,%08lx): returning NULL\n", heap, flags, ptr, size );
- return NULL;
- }
-
pArena = (ARENA_INUSE *)ptr - 1;
- subheap = HEAP_FindSubHeap( heapPtr, pArena );
- oldSize = (pArena->size & ARENA_SIZE_MASK);
- if (rounded_size > oldSize)
+ if(!(subheap = HEAP_FindSubHeap( heapPtr, pArena ))) goto done;
+ if ((char *)pArena < (char *)subheap + subheap->headerSize) goto error;
+ if (!HEAP_ValidateInUseArena( subheap, pArena, QUIET )) goto error;
+
+ oldBlockSize = (pArena->size & ARENA_SIZE_MASK);
+ oldActualSize = (pArena->size & ARENA_SIZE_MASK) - pArena->unused_bytes;
+
+ if (rounded_size > oldBlockSize)
{
- char *pNext = (char *)(pArena + 1) + oldSize;
+ char *pNext = (char *)(pArena + 1) + oldBlockSize;
if ((pNext < (char *)subheap + subheap->size) &&
(*(DWORD *)pNext & ARENA_FLAG_FREE) &&
- (oldSize + (*(DWORD *)pNext & ARENA_SIZE_MASK) + sizeof(ARENA_FREE) >= rounded_size))
+ (oldBlockSize + (*(DWORD *)pNext & ARENA_SIZE_MASK) + sizeof(ARENA_FREE) >= rounded_size))
{
- /* The next block is free and large enough */
ARENA_FREE *pFree = (ARENA_FREE *)pNext;
list_remove( &pFree->entry );
pArena->size += (pFree->size & ARENA_SIZE_MASK) + sizeof(*pFree);
- if (!HEAP_Commit( subheap, pArena, rounded_size ))
- {
- if (!(flags & HEAP_NO_SERIALIZE)) RtlLeaveHeapLock( &heapPtr->critSection );
- if (flags & HEAP_GENERATE_EXCEPTIONS) RtlRaiseStatus( STATUS_NO_MEMORY );
- RtlSetLastWin32ErrorAndNtStatusFromNtStatus( STATUS_NO_MEMORY );
- return NULL;
- }
+
+ if (!HEAP_Commit( subheap, pArena, rounded_size )) goto oom;
+
HEAP_ShrinkBlock( subheap, pArena, rounded_size );
+ mark_block_initialized( pArena + 1, oldActualSize );
}
else /* Do it the hard way */
{
ARENA_FREE *pNew;
ARENA_INUSE *pInUse;
SUBHEAP *newsubheap;
-
if ((flags & HEAP_REALLOC_IN_PLACE_ONLY) ||
!(pNew = HEAP_FindFreeBlock( heapPtr, rounded_size, &newsubheap )))
- {
- if (!(flags & HEAP_NO_SERIALIZE)) RtlLeaveHeapLock( &heapPtr->critSection );
- if (flags & HEAP_GENERATE_EXCEPTIONS) RtlRaiseStatus( STATUS_NO_MEMORY );
- RtlSetLastWin32ErrorAndNtStatusFromNtStatus( STATUS_NO_MEMORY );
- return NULL;
- }
+ goto oom;
/* Build the in-use arena */
+ sizeof(ARENA_FREE) - sizeof(ARENA_INUSE);
pInUse->magic = ARENA_INUSE_MAGIC;
HEAP_ShrinkBlock( newsubheap, pInUse, rounded_size );
- mark_block_initialized( pInUse + 1, oldSize );
- memcpy( pInUse + 1, pArena + 1, oldSize );
+
+ mark_block_initialized( pInUse + 1, oldActualSize );
+ memcpy( pInUse + 1, pArena + 1, oldActualSize );
/* Free the previous block */
pArena = pInUse;
}
}
- else HEAP_ShrinkBlock( subheap, pArena, rounded_size ); /* Shrink the block */
+ else
+ {
+ HEAP_ShrinkBlock( subheap, pArena, rounded_size ); /* Shrink the block */
+ mark_block_initialized( pArena + 1, size );
+ }
pArena->unused_bytes = (pArena->size & ARENA_SIZE_MASK) - size;
/* Clear the extra bytes if needed */
- if (rounded_size > oldSize)
+ if (size > oldActualSize)
{
if (flags & HEAP_ZERO_MEMORY)
- clear_block( (char *)(pArena + 1) + oldSize,
- (pArena->size & ARENA_SIZE_MASK) - oldSize );
+ {
+ clear_block( (char *)(pArena + 1) + oldActualSize, size - oldActualSize );
+ mark_block_uninitialized( (char *)(pArena + 1) + size,
+ (pArena->size & ARENA_SIZE_MASK) - oldActualSize );
+ }
else
- mark_block_uninitialized( (char *)(pArena + 1) + oldSize,
- (pArena->size & ARENA_SIZE_MASK) - oldSize );
+ mark_block_uninitialized( (char *)(pArena + 1) + oldActualSize,
+ (pArena->size & ARENA_SIZE_MASK) - oldActualSize );
}
/* Return the new arena */
-
- if (!(flags & HEAP_NO_SERIALIZE)) RtlLeaveHeapLock( &heapPtr->critSection );
-
+done:
+ if (!(flags & HEAP_NO_SERIALIZE)) RtlLeaveHeapLock( &heapPtr->lock );
TRACE("(%p,%08lx,%p,%08lx): returning %p\n", heap, flags, ptr, size, pArena + 1 );
return (LPVOID)(pArena + 1);
+
+oom:
+ if (!(flags & HEAP_NO_SERIALIZE)) RtlLeaveHeapLock( &heapPtr->lock );
+ if (flags & HEAP_GENERATE_EXCEPTIONS) RtlRaiseStatus( STATUS_NO_MEMORY );
+ RtlSetLastWin32ErrorAndNtStatusFromNtStatus( STATUS_NO_MEMORY );
+ TRACE("(%p,%08x,%p,%08lx): returning oom\n", heap, flags, ptr, size );
+ return NULL;
+
+error:
+ if (!(flags & HEAP_NO_SERIALIZE)) RtlLeaveHeapLock( &heapPtr->lock );
+ RtlSetLastWin32ErrorAndNtStatusFromNtStatus( STATUS_INVALID_PARAMETER );
+ TRACE("(%p,%08x,%p,%08lx): returning error\n", heap, flags, ptr, size );
+ return NULL;
}
HEAP *heapPtr = HEAP_GetPtr( Heap );
if (!heapPtr)
return FALSE;
- RtlEnterHeapLock( &heapPtr->critSection );
+ RtlEnterHeapLock( &heapPtr->lock );
return TRUE;
}
HEAP *heapPtr = HEAP_GetPtr( Heap );
if (!heapPtr)
return FALSE;
- RtlLeaveHeapLock( &heapPtr->critSection );
+ RtlLeaveHeapLock( &heapPtr->lock );
return TRUE;
}
*
* @implemented
*/
-ULONG NTAPI
+SIZE_T NTAPI
RtlSizeHeap(
HANDLE heap,
ULONG flags,
PVOID ptr
)
{
- SIZE_T ret;
+ SIZE_T ret;
HEAP *heapPtr = HEAP_GetPtr( heap );
if (!heapPtr)
{
RtlSetLastWin32ErrorAndNtStatusFromNtStatus( STATUS_INVALID_HANDLE );
- return ~0UL;
+ return MAXULONG;
}
flags &= HEAP_NO_SERIALIZE;
flags |= heapPtr->flags;
- if (!(flags & HEAP_NO_SERIALIZE)) RtlEnterHeapLock( &heapPtr->critSection );
+ if (!(flags & HEAP_NO_SERIALIZE)) RtlEnterHeapLock( &heapPtr->lock );
if (!HEAP_IsRealArena( heapPtr, HEAP_NO_SERIALIZE, ptr, QUIET ))
{
RtlSetLastWin32ErrorAndNtStatusFromNtStatus( STATUS_INVALID_PARAMETER );
- ret = ~0UL;
+ ret = MAXULONG;
}
else
{
- ARENA_INUSE *pArena = (ARENA_INUSE *)ptr - 1;
+ const ARENA_INUSE *pArena = (const ARENA_INUSE *)ptr - 1;
ret = (pArena->size & ARENA_SIZE_MASK) - pArena->unused_bytes;
}
- if (!(flags & HEAP_NO_SERIALIZE)) RtlLeaveHeapLock( &heapPtr->critSection );
+ if (!(flags & HEAP_NO_SERIALIZE)) RtlLeaveHeapLock( &heapPtr->lock );
TRACE("(%p,%08lx,%p): returning %08lx\n", heap, flags, ptr, ret );
return ret;
RtlEnumProcessHeaps(PHEAP_ENUMERATION_ROUTINE HeapEnumerationRoutine,
PVOID lParam)
{
- DPRINT1("UNIMPLEMENTED\n");
- DPRINT1("UNIMPLEMENTED\n");
- DPRINT1("UNIMPLEMENTED\n");
- DPRINT1("UNIMPLEMENTED\n");
- DbgBreakPoint();
- return STATUS_SUCCESS;
-#if 0
- NTSTATUS Status = STATUS_SUCCESS;
- HEAP** pptr;
+ NTSTATUS Status = STATUS_SUCCESS;
- RtlEnterHeapLock(&RtlpProcessHeapsListLock);
+ struct list *ptr=NULL;
+ RtlEnterHeapLock(&processHeap->lock);
+ Status=HeapEnumerationRoutine(processHeap,lParam);
- for (pptr = (HEAP**)&NtCurrentPeb()->ProcessHeaps; *pptr; pptr = &(*pptr)->next)
- {
- Status = HeapEnumerationRoutine(*pptr,lParam);
- if (!NT_SUCCESS(Status))
- break;
- }
+ LIST_FOR_EACH( ptr, &processHeap->entry )
+ {
+ if (!NT_SUCCESS(Status)) break;
+ Status = HeapEnumerationRoutine(ptr,lParam);
+ }
- RtlLeaveHeapLock(&RtlpProcessHeapsListLock);
+ RtlLeaveHeapLock(&processHeap->lock);
- return Status;
-#endif
+ return Status;
}
{
ULONG total = 1; /* main heap */
struct list *ptr;
-
- RtlEnterHeapLock( &processHeap->critSection );
+ ULONG i=0;
+ RtlEnterHeapLock( &processHeap->lock );
LIST_FOR_EACH( ptr, &processHeap->entry ) total++;
- if (total <= count)
+ //if (total <= count)
{
- *heaps++ = processHeap;
+ *(heaps++) = processHeap;
+ i++;
LIST_FOR_EACH( ptr, &processHeap->entry )
- *heaps++ = LIST_ENTRY( ptr, HEAP, entry );
+ {
+ if (i >= count) break;
+ i++;
+ *(heaps++) = LIST_ENTRY( ptr, HEAP, entry );
+ }
}
- RtlLeaveHeapLock( &processHeap->critSection );
- return total;
+ RtlLeaveHeapLock( &processHeap->lock );
+ return i;
}
BOOLEAN NTAPI
RtlValidateProcessHeaps(VOID)
{
- DPRINT1("UNIMPLEMENTED\n");
- DPRINT1("UNIMPLEMENTED\n");
- DPRINT1("UNIMPLEMENTED\n");
- DPRINT1("UNIMPLEMENTED\n");
- DbgBreakPoint();
- return STATUS_SUCCESS;
-#if 0
BOOLEAN Result = TRUE;
HEAP ** pptr;
- RtlEnterHeapLock(&RtlpProcessHeapsListLock);
+ RtlEnterHeapLock( &processHeap->lock );
- for (pptr = (HEAP**)&NtCurrentPeb()->ProcessHeaps; *pptr; pptr = &(*pptr)->next)
+ for (pptr = (HEAP**)&NtCurrentPeb()->ProcessHeaps; *pptr; pptr++)
{
if (!RtlValidateHeap(*pptr, 0, NULL))
{
}
}
- RtlLeaveHeapLock (&RtlpProcessHeapsListLock);
-
+ RtlLeaveHeapLock( &processHeap->lock );
return Result;
-#endif
}
}
/*
- * @unimplemented
+ * @implemented
*/
BOOLEAN
NTAPI
IN PVOID BaseAddress,
IN PVOID UserValue)
{
- HEAP *heapPtr = HEAP_GetPtr(HeapHandle);
- ARENA_INUSE *pInUse;
- SUBHEAP *subheap;
+ HEAP *heapPtr;
+ PHEAP_USER_DATA udata;
+
+ heapPtr = HEAP_GetPtr(HeapHandle);
+ if (!heapPtr)
+ {
+ RtlSetLastWin32ErrorAndNtStatusFromNtStatus( STATUS_INVALID_HANDLE );
+ return FALSE;
+ }
+ udata = HEAP_GetUserData(heapPtr, BaseAddress);
+ if (!udata)
+ {
+ udata = HEAP_AllocUserData(heapPtr, BaseAddress);
+ if (!udata) return FALSE;
+ }
+ udata->UserValue = UserValue;
+ return TRUE;
+}
- /* Get the subheap */
- pInUse = (ARENA_INUSE *)BaseAddress - 1;
- subheap = HEAP_FindSubHeap( heapPtr, pInUse );
+/*
+ * @implemented
+ */
+BOOLEAN
+NTAPI
+RtlSetUserFlagsHeap(IN PVOID HeapHandle,
+ IN ULONG Flags,
+ IN PVOID BaseAddress,
+ IN ULONG UserFlagsReset,
+ IN ULONG UserFlagsSet)
+{
+ HEAP *heapPtr;
+ PHEAP_USER_DATA udata;
- /* Hack */
- subheap->UserValue = UserValue;
+ heapPtr = HEAP_GetPtr(HeapHandle);
+ if (!heapPtr)
+ {
+ RtlSetLastWin32ErrorAndNtStatusFromNtStatus( STATUS_INVALID_HANDLE );
+ return FALSE;
+ }
+ udata = HEAP_GetUserData(heapPtr, BaseAddress);
+ if (!udata)
+ {
+ udata = HEAP_AllocUserData(heapPtr, BaseAddress);
+ if (!udata) return FALSE;
+ }
+ udata->UserFlags = UserFlagsSet & HEAP_SETTABLE_USER_FLAGS;
return TRUE;
}
/*
- * @unimplemented
+ * @implemented
*/
BOOLEAN
NTAPI
OUT PVOID *UserValue,
OUT PULONG UserFlags)
{
- HEAP *heapPtr = HEAP_GetPtr(HeapHandle);
- ARENA_INUSE *pInUse;
- SUBHEAP *subheap;
-
- /* Get the subheap */
- pInUse = (ARENA_INUSE *)BaseAddress - 1;
- subheap = HEAP_FindSubHeap( heapPtr, pInUse );
+ HEAP *heapPtr;
+ PHEAP_USER_DATA udata;
- /* Hack */
- DPRINT1("V/F: %lx %p\n", subheap->UserValue, subheap->UserFlags);
- if (UserValue) *UserValue = subheap->UserValue;
- if (UserFlags) *UserFlags = subheap->UserFlags;
+ heapPtr = HEAP_GetPtr(HeapHandle);
+ if (!heapPtr)
+ {
+ RtlSetLastWin32ErrorAndNtStatusFromNtStatus( STATUS_INVALID_HANDLE );
+ return FALSE;
+ }
+ udata = HEAP_GetUserData(heapPtr, BaseAddress);
+ if (!udata)
+ {
+ RtlSetLastWin32ErrorAndNtStatusFromNtStatus( STATUS_INVALID_PARAMETER );
+ return FALSE;
+ }
+ if (UserValue) *UserValue = udata->UserValue;
+ if (UserFlags) *UserFlags = udata->UserFlags;
return TRUE;
}
RtlExtendHeap(IN HANDLE Heap,
IN ULONG Flags,
IN PVOID P,
- IN ULONG Size)
+ IN SIZE_T Size)
{
/* TODO */
UNIMPLEMENTED;
return 0;
}
-/* EOF */
+NTSTATUS
+NTAPI
+RtlWalkHeap(IN HANDLE HeapHandle,
+ IN PVOID HeapEntry)
+{
+ UNIMPLEMENTED;
+ return STATUS_NOT_IMPLEMENTED;
+}
+
+PVOID
+NTAPI
+RtlProtectHeap(IN PVOID HeapHandle,
+ IN BOOLEAN ReadOnly)
+{
+ UNIMPLEMENTED;
+ return NULL;
+}
+
+NTSTATUS
+NTAPI
+RtlSetHeapInformation(IN HANDLE HeapHandle OPTIONAL,
+ IN HEAP_INFORMATION_CLASS HeapInformationClass,
+ IN PVOID HeapInformation,
+ IN SIZE_T HeapInformationLength)
+{
+ UNIMPLEMENTED;
+ return 0;
+}
+
+NTSTATUS
+NTAPI
+RtlQueryHeapInformation(HANDLE HeapHandle,
+ HEAP_INFORMATION_CLASS HeapInformationClass,
+ PVOID HeapInformation OPTIONAL,
+ SIZE_T HeapInformationLength OPTIONAL,
+ PSIZE_T ReturnLength OPTIONAL)
+{
+ HEAP *heapPtr;
+
+ heapPtr = HEAP_GetPtr(HeapHandle);
+ if (!heapPtr)
+ {
+ RtlSetLastWin32ErrorAndNtStatusFromNtStatus( STATUS_INVALID_HANDLE );
+ return FALSE;
+ }
+
+ UNIMPLEMENTED
+
+ switch (HeapInformationClass)
+ {
+ case HeapCompatibilityInformation:
+ if (ReturnLength) *ReturnLength = sizeof(ULONG);
+
+ if (HeapInformationLength < sizeof(ULONG))
+ return STATUS_BUFFER_TOO_SMALL;
+
+ *(ULONG *)HeapInformation = 0; /* standard heap */
+ return STATUS_SUCCESS;
+ default:
+ return STATUS_INVALID_INFO_CLASS;
+ }
+}
+
+NTSTATUS
+NTAPI
+RtlMultipleAllocateHeap(IN PVOID HeapHandle,
+ IN ULONG Flags,
+ IN SIZE_T Size,
+ IN ULONG Count,
+ OUT PVOID *Array)
+{
+ UNIMPLEMENTED;
+ return 0;
+}
+
+NTSTATUS
+NTAPI
+RtlMultipleFreeHeap(IN PVOID HeapHandle,
+ IN ULONG Flags,
+ IN ULONG Count,
+ OUT PVOID *Array)
+{
+ UNIMPLEMENTED;
+ return 0;
+}