1
0
Fork 0
mirror of https://github.com/beefytech/Beef.git synced 2025-06-08 11:38:21 +02:00

Added write-past-end detection in debug allocator

This commit is contained in:
Brian Fiete 2022-06-02 10:55:29 -07:00
parent be05439421
commit e623449e24
3 changed files with 156 additions and 23 deletions

View file

@ -292,10 +292,12 @@ bf::System::Object* Internal::Dbg_ObjectAlloc(bf::System::Reflection::TypeInstan
{
BF_ASSERT((BFRTFLAGS & BfRtFlags_ObjectHasDebugFlags) != 0);
Object* result;
intptr allocSize = BF_ALIGN(size, typeInst->mInstAlign);
//TODO: Why did we align this?
//intptr allocSize = BF_ALIGN(size, typeInst->mInstAlign);
intptr allocSize = size;
uint8* allocBytes = (uint8*)BfObjectAllocate(allocSize, typeInst->_GetType());
// int dataOffset = (int)(sizeof(intptr) * 2);
// memset(allocBytes + dataOffset, 0, size - dataOffset);
result = (bf::System::Object*)allocBytes;
auto classVData = typeInst->mTypeClassVData;
(void)classVData;
@ -343,10 +345,11 @@ bf::System::Object* Internal::Dbg_ObjectAlloc(bf::System::ClassVData* classVData
bf::System::Object* result;
if ((BFRTFLAGS & BfRtFlags_LeakCheck) != 0)
{
allocSize = BF_ALIGN(allocSize, align);
//TODO: Why did we align this?
//intptr allocSize = BF_ALIGN(size, typeInst->mInstAlign);
intptr allocSize = size;
uint8* allocBytes = (uint8*)BfObjectAllocate(allocSize, classVData->mType);
// int dataOffset = (int)(sizeof(intptr) * 2);
// memset(allocBytes + dataOffset, 0, size - dataOffset);
result = (bf::System::Object*)(allocBytes);
}
else

View file

@ -526,9 +526,83 @@ void BFCheckSuspended()
BF_ASSERT((internalThread == NULL) || (!internalThread->mIsSuspended));
}
inline void* BF_do_malloc_pages(ThreadCache* heap, size_t& size)
{
void* result;
bool report_large;
heap->requested_bytes_ += size;
Length num_pages = TCMALLOC_NAMESPACE::pages(size);
size = num_pages << kPageShift;
if ((TCMALLOC_NAMESPACE::FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) {
result = DoSampledAllocation(size);
SpinLockHolder h(Static::pageheap_lock());
report_large = should_report_large(num_pages);
}
else {
SpinLockHolder h(Static::pageheap_lock());
Span* span = Static::pageheap()->New(num_pages);
result = (UNLIKELY(span == NULL) ? NULL : SpanToMallocResult(span));
report_large = should_report_large(num_pages);
}
if (report_large) {
ReportLargeAlloc(num_pages, result);
}
return result;
}
inline void* BF_do_malloc_small(ThreadCache* heap, size_t& size)
{
if (size == 0)
size = (int)sizeof(void*);
ASSERT(Static::IsInited());
ASSERT(heap != NULL);
heap->requested_bytes_ += size;
size_t cl = Static::sizemap()->SizeClass(size);
size = Static::sizemap()->class_to_size(cl);
void* result;
if ((TCMALLOC_NAMESPACE::FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) {
result = DoSampledAllocation(size);
}
else {
// The common case, and also the simplest. This just pops the
// size-appropriate freelist, after replenishing it if it's empty.
result = CheckedMallocResult(heap->Allocate(size, cl));
}
return result;
}
void* BfObjectAllocate(intptr size, bf::System::Type* objType)
{
bf::System::Object* obj = (bf::System::Object*)tc_malloc(size);
size_t totalSize = size;
totalSize += 4; // int16 protectBytes, <unused bytes>, int16 sizeOffset
void* result;
if (ThreadCache::have_tls &&
LIKELY(totalSize < ThreadCache::MinSizeForSlowPath()))
{
result = BF_do_malloc_small(ThreadCache::GetCacheWhichMustBePresent(), totalSize);
}
else if (totalSize <= kMaxSize)
{
result = BF_do_malloc_small(ThreadCache::GetCache(), totalSize);
}
else
{
result = BF_do_malloc_pages(ThreadCache::GetCache(), totalSize);
}
*(uint16*)((uint8*)result + size) = 0xBFBF;
*(uint16*)((uint8*)result + totalSize - 2) = totalSize - size;
bf::System::Object* obj = (bf::System::Object*)result;
BFLOG2(GCLog::EVENT_ALLOC, (intptr)obj, (intptr)objType);
@ -880,6 +954,34 @@ void BFGC::MarkStatics()
void BFGC::ObjectDeleteRequested(bf::System::Object* obj)
{
const PageID p = reinterpret_cast<uintptr_t>(obj) >> kPageShift;
size_t cl = Static::pageheap()->GetSizeClassIfCached(p);
intptr allocSize = 0;
if (cl == 0)
{
auto span = Static::pageheap()->GetDescriptor(p);
if (span != NULL)
{
cl = span->sizeclass;
if (cl == 0)
{
allocSize = span->length << kPageShift;
}
}
}
if (cl != 0)
allocSize = Static::sizemap()->class_to_size(cl);
int sizeOffset = *(uint16*)((uint8*)obj + allocSize - 2);
int requestedSize = allocSize - sizeOffset;
if ((sizeOffset < 4) || (sizeOffset >= allocSize) || (sizeOffset >= kPageSize) ||
(*(uint16*)((uint8*)obj + requestedSize) != 0xBFBF))
{
Beefy::String err = Beefy::StrFormat("Memory deallocation detected write-past-end error in %d-byte object allocation at 0x%@", requestedSize, obj);
BF_FATAL(err);
return;
}
if (mFreeTrigger >= 0)
{
int objSize = BFGetObjectSize(obj);
@ -1578,20 +1680,10 @@ static void GCObjFree(void* ptr)
return;
}
int size = Static::sizemap()->class_to_size(span->sizeclass);
int dataOffset = (int)(sizeof(intptr) * 2);
memset((uint8*)ptr + dataOffset, 0, size - dataOffset);
// const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
// size_t cl = Static::pageheap()->GetSizeClassIfCached(p);
// if (cl == 0)
// {
// tc_free(ptr);
// return;
// }
//
// Span* span = TCGetSpanAt(ptr);
if (span->freeingObjectsTail == NULL)
{
span->freeingObjectsTail = ptr;

View file

@ -528,7 +528,7 @@ void BFGC::RawShutdown()
void* BfRawAllocate(intptr size, bf::System::DbgRawAllocData* rawAllocData, void* stackTraceInfo, int stackTraceCount)
{
size_t totalSize = size;
totalSize += sizeof(intptr);
totalSize += sizeof(intptr) + 4; // int16 protectBytes, <unused bytes>, int16 sizeOffset, <stack trace data>, DbgRawAllocData ptr
if (rawAllocData->mMaxStackTrace == 1)
totalSize += sizeof(intptr);
else if (rawAllocData->mMaxStackTrace > 1)
@ -549,16 +549,26 @@ void* BfRawAllocate(intptr size, bf::System::DbgRawAllocData* rawAllocData, void
result = BF_do_malloc_pages(ThreadCache::GetCache(), totalSize);
}
*(uint16*)((uint8*)result + size) = 0xBFBF;
uint16* markOffsetPtr = NULL;
if (rawAllocData->mMaxStackTrace == 1)
{
memcpy((uint8*)result + totalSize - sizeof(intptr) - sizeof(intptr), stackTraceInfo, sizeof(intptr));
markOffsetPtr = (uint16*)((uint8*)result + totalSize - sizeof(intptr) - sizeof(intptr) - 2);
}
else if (rawAllocData->mMaxStackTrace > 1)
{
memcpy((uint8*)result + totalSize - sizeof(intptr) - sizeof(intptr), &stackTraceCount, sizeof(intptr));
memcpy((uint8*)result + totalSize - sizeof(intptr) - sizeof(intptr) - stackTraceCount*sizeof(intptr), stackTraceInfo, stackTraceCount*sizeof(intptr));
markOffsetPtr = (uint16*)((uint8*)result + totalSize - sizeof(intptr) - sizeof(intptr) - stackTraceCount * sizeof(intptr) - 2);
}
else
{
markOffsetPtr = (uint16*)((uint8*)result + totalSize - sizeof(intptr) - 2);
}
*markOffsetPtr = ((uint8*)markOffsetPtr) - ((uint8*)result + size);
memcpy((uint8*)result + totalSize - sizeof(intptr), &rawAllocData, sizeof(intptr));
BfpSystem_InterlockedExchangeAdd32((uint32*)&gRawAllocSize, (uint32)totalSize);
@ -588,7 +598,7 @@ void BfRawFree(void* ptr)
if (allocSize == 0)
{
Beefy::String err = Beefy::StrFormat("Memory deallocation requested at invalid address %@", ptr);
Beefy::String err = Beefy::StrFormat("Memory deallocation requested at invalid address 0x%@", ptr);
BF_FATAL(err);
}
@ -600,10 +610,38 @@ void BfRawFree(void* ptr)
void** dbgAllocDataAddr = (void**)((uint8*)ptr + allocSize - sizeof(void*));
if (*dbgAllocDataAddr == 0)
{
Beefy::String err = Beefy::StrFormat("Memory deallocation requested at %@ but no allocation is recorded. Double delete?", ptr);
Beefy::String err = Beefy::StrFormat("Memory deallocation requested at 0x%@ but no allocation is recorded. Double delete?", ptr);
BF_FATAL(err);
return;
}
else if ((*dbgAllocDataAddr == &sObjectAllocData) && ((gBFGC.mMaxRawDeferredObjectFreePercentage != 0) || (!gDeferredFrees.IsEmpty())))
auto rawAllocData = (bf::System::DbgRawAllocData*)*dbgAllocDataAddr;
uint16* markOffsetPtr;
if (rawAllocData->mMaxStackTrace == 1)
{
markOffsetPtr = (uint16*)((uint8*)ptr + allocSize - sizeof(intptr) - sizeof(intptr) - 2);
}
else if (rawAllocData->mMaxStackTrace > 1)
{
int stackTraceCount = *(int*)((uint8*)ptr + allocSize - sizeof(intptr) - sizeof(intptr));
markOffsetPtr = (uint16*)((uint8*)ptr + allocSize - sizeof(intptr) - sizeof(intptr) - stackTraceCount * sizeof(intptr) - 2);
}
else
{
markOffsetPtr = (uint16*)((uint8*)ptr + allocSize - sizeof(intptr) - 2);
}
int markOffset = *markOffsetPtr;
if ((markOffset < 2) || (markOffset >= allocSize) || (markOffset >= kPageSize) ||
(*(uint16*)((uint8*)markOffsetPtr - markOffset) != 0xBFBF))
{
int requestedSize = (uint8*)markOffsetPtr - (uint8*)ptr - markOffset;
Beefy::String err = Beefy::StrFormat("Memory deallocation detected write-past-end error in %d-byte raw allocation at 0x%@", requestedSize, ptr);
BF_FATAL(err);
return;
}
if ((*dbgAllocDataAddr == &sObjectAllocData) && ((gBFGC.mMaxRawDeferredObjectFreePercentage != 0) || (!gDeferredFrees.IsEmpty())))
{
*dbgAllocDataAddr = NULL;