mirror of
https://github.com/beefytech/Beef.git
synced 2025-07-14 12:13:51 +02:00
Initial checkin
This commit is contained in:
parent
c74712dad9
commit
078564ac9e
3242 changed files with 1616395 additions and 0 deletions
615
BeefRT/dbg/DbgInternal.cpp
Normal file
615
BeefRT/dbg/DbgInternal.cpp
Normal file
|
@ -0,0 +1,615 @@
|
|||
#pragma warning(disable:4996)
|
||||
#define HEAPHOOK
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
//#include <crtdefs.h>
|
||||
#include <malloc.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
//#include <intrin.h>
|
||||
|
||||
//#define OBJECT_GUARD_END_SIZE 8
|
||||
#define OBJECT_GUARD_END_SIZE 0
|
||||
|
||||
//#define BF_USE_STOMP_ALLOC 1
|
||||
|
||||
//extern "C"
|
||||
//{
|
||||
//#include "gperftools/stacktrace.h"
|
||||
//}
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#include <intrin.h>
|
||||
#pragma intrinsic(_ReturnAddress)
|
||||
#define BF_RETURN_ADDRESS _ReturnAddress()
|
||||
#else
|
||||
#define BF_RETURN_ADDRESS __builtin_return_address(0)
|
||||
#endif
|
||||
|
||||
#include "BeefySysLib/Common.h"
|
||||
#include "../rt/BfObjects.h"
|
||||
#include "gc.h"
|
||||
#include "../rt/StompAlloc.h"
|
||||
#include "BeefySysLib/platform/PlatformHelper.h"
|
||||
|
||||
USING_NS_BF;
|
||||
|
||||
#ifdef BF_PLATFORM_WINDOWS
|
||||
bf::System::Runtime::BfRtCallbacks gBfRtCallbacks;
|
||||
BfRtFlags gBfRtFlags = (BfRtFlags)0;
|
||||
#endif
|
||||
|
||||
namespace bf
|
||||
{
|
||||
namespace System
|
||||
{
|
||||
class Object;
|
||||
class Exception;
|
||||
|
||||
//System::Threading::Thread* gMainThread;
|
||||
|
||||
class Internal
|
||||
{
|
||||
public:
|
||||
BFRT_EXPORT static intptr Dbg_PrepareStackTrace(intptr baseAllocSize, intptr maxStackTraceDepth);
|
||||
BFRT_EXPORT void Dbg_ReserveMetadataBytes(intptr metadataBytes, intptr& curAllocBytes);
|
||||
BFRT_EXPORT void* Dbg_GetMetadata(System::Object* obj);
|
||||
BFRT_EXPORT static void Dbg_ObjectCreated(bf::System::Object* result, intptr size, bf::System::ClassVData* classVData);
|
||||
BFRT_EXPORT static void Dbg_ObjectCreatedEx(bf::System::Object* result, intptr size, bf::System::ClassVData* classVData);
|
||||
BFRT_EXPORT static void Dbg_ObjectAllocated(bf::System::Object* result, intptr size, bf::System::ClassVData* classVData);
|
||||
BFRT_EXPORT static void Dbg_ObjectAllocatedEx(bf::System::Object* result, intptr size, bf::System::ClassVData* classVData);
|
||||
BFRT_EXPORT static Object* Dbg_ObjectAlloc(bf::System::Reflection::TypeInstance* typeInst, intptr size);
|
||||
BFRT_EXPORT static Object* Dbg_ObjectAlloc(bf::System::ClassVData* classVData, intptr size, intptr align, intptr maxStackTraceDept);
|
||||
BFRT_EXPORT static void Dbg_MarkObjectDeleted(bf::System::Object* obj);
|
||||
BFRT_EXPORT static void Dbg_ObjectStackInit(bf::System::Object* result, bf::System::ClassVData* classVData);
|
||||
BFRT_EXPORT static void Dbg_ObjectPreDelete(bf::System::Object* obj);
|
||||
BFRT_EXPORT static void Dbg_ObjectPreCustomDelete(bf::System::Object* obj);
|
||||
|
||||
BFRT_EXPORT static void* Dbg_RawMarkedAlloc(intptr size, void* markFunc);
|
||||
BFRT_EXPORT static void* Dbg_RawMarkedArrayAlloc(intptr elemCount, intptr elemSize, void* markFunc);
|
||||
BFRT_EXPORT static void* Dbg_RawAlloc(intptr size);
|
||||
BFRT_EXPORT static void* Dbg_RawObjectAlloc(intptr size);
|
||||
|
||||
BFRT_EXPORT static void* Dbg_RawAlloc(intptr size, DbgRawAllocData* rawAllocData);
|
||||
BFRT_EXPORT static void Dbg_RawFree(void* ptr);
|
||||
};
|
||||
|
||||
namespace IO
|
||||
{
|
||||
class File
|
||||
{
|
||||
private:
|
||||
BFRT_EXPORT static bool Exists(char* fileName);
|
||||
};
|
||||
|
||||
class Directory
|
||||
{
|
||||
private:
|
||||
BFRT_EXPORT static bool Exists(char* fileName);
|
||||
};
|
||||
}
|
||||
|
||||
namespace Diagnostics
|
||||
{
|
||||
namespace Contracts
|
||||
{
|
||||
class Contract
|
||||
{
|
||||
public:
|
||||
enum ContractFailureKind : uint8
|
||||
{
|
||||
ContractFailureKind_Precondition,
|
||||
//[SuppressMessage("Microsoft.Naming", "CA1704:IdentifiersShouldBeSpelledCorrectly", MessageId = "Postcondition")]
|
||||
ContractFailureKind_Postcondition,
|
||||
//[SuppressMessage("Microsoft.Naming", "CA1704:IdentifiersShouldBeSpelledCorrectly", MessageId = "Postcondition")]
|
||||
ContractFailureKind_PostconditionOnException,
|
||||
ContractFailureKind_Invariant,
|
||||
ContractFailureKind_Assert,
|
||||
ContractFailureKind_Assume,
|
||||
};
|
||||
|
||||
private:
|
||||
BFRT_EXPORT static void ReportFailure(ContractFailureKind failureKind, char* userMessage, int userMessageLen, char* conditionText, int conditionTextLen);
|
||||
};
|
||||
}
|
||||
|
||||
class Debug
|
||||
{
|
||||
private:
|
||||
BFRT_EXPORT static void Write(char* str, intptr strLen);
|
||||
};
|
||||
}
|
||||
|
||||
namespace FFI
|
||||
{
|
||||
enum FFIABI : int32;
|
||||
enum FFIResult : int32;
|
||||
|
||||
struct FFIType;
|
||||
|
||||
struct FFILIB
|
||||
{
|
||||
struct FFICIF;
|
||||
|
||||
BFRT_EXPORT static void* ClosureAlloc(intptr size, void** outFunc);
|
||||
BFRT_EXPORT static FFIResult PrepCif(FFICIF* cif, FFIABI abi, int32 nargs, FFIType* rtype, FFIType** argTypes);
|
||||
BFRT_EXPORT static void Call(FFICIF* cif, void* funcPtr, void* rvalue, void** args);
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//#define BF_TRACK_SIZES 1
|
||||
|
||||
#if BF_TRACK_SIZES
|
||||
static int sAllocSizes[1024 * 1024];
|
||||
static int sHighestId = 0;
|
||||
#endif
|
||||
|
||||
using namespace bf::System;
|
||||
|
||||
/*static void* MallocHook(size_t size, const void *caller)
|
||||
{
|
||||
printf("MallocHook\n");
|
||||
return NULL;
|
||||
}*/
|
||||
|
||||
|
||||
/*static int __cdecl HeapHook(int a, size_t b, void* c, void** d)
|
||||
{
|
||||
printf("Heap Hook\n");
|
||||
return 0;
|
||||
}*/
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
|
||||
Beefy::StringT<0> gDbgErrorString;
|
||||
|
||||
extern DbgRawAllocData sEmptyAllocData;
|
||||
extern DbgRawAllocData sObjectAllocData;
|
||||
|
||||
#define SETUP_ERROR(str, skip) gDbgErrorString = str; gBfRtCallbacks.DebugMessageData_SetupError(str, skip)
|
||||
|
||||
#ifdef BF_PLATFORM_WINDOWS
|
||||
#define BF_CAPTURE_STACK(skipCount, outFrames, wantCount) (int)RtlCaptureStackBackTrace(skipCount, wantCount, (void**)outFrames, NULL)
|
||||
#else
|
||||
#define BF_CAPTURE_STACK(skipCount, outFrames, wantCount) BfpStack_CaptureBackTrace(skipCount, outFrames, wantCount)
|
||||
#endif
|
||||
|
||||
|
||||
static void GetCrashInfo()
|
||||
{
|
||||
if (!gDbgErrorString.IsEmpty())
|
||||
{
|
||||
Beefy::String debugStr;
|
||||
debugStr += "Beef Error: ";
|
||||
debugStr += gDbgErrorString;
|
||||
BfpSystem_AddCrashInfo(debugStr.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
void bf::System::Runtime::Dbg_Init(int version, int flags, BfRtCallbacks* callbacks)
|
||||
{
|
||||
//BfpSystem_Init(BFP_VERSION, BfpSystemInitFlag_None);
|
||||
|
||||
if (version != BFRT_VERSION)
|
||||
{
|
||||
BfpSystem_FatalError(StrFormat("BeefDbg build version '%d' does not match requested version '%d'", BFRT_VERSION, version).c_str(), "BEEF FATAL ERROR");
|
||||
}
|
||||
|
||||
gBfRtCallbacks = *callbacks;
|
||||
gBfRtFlags = (BfRtFlags)flags;
|
||||
#ifdef BF_GC_SUPPORTED
|
||||
gGCDbgData.mDbgFlags = gBfRtFlags;
|
||||
#endif
|
||||
}
|
||||
|
||||
void* bf::System::Runtime::Dbg_GetCrashInfoFunc()
|
||||
{
|
||||
return *(void**)&GetCrashInfo;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
void Internal::Dbg_MarkObjectDeleted(bf::System::Object* object)
|
||||
{
|
||||
BF_ASSERT((gBfRtFlags & BfRtFlags_ObjectHasDebugFlags) != 0);
|
||||
if ((gBfRtFlags & BfRtFlags_ObjectHasDebugFlags) != 0)
|
||||
object->mObjectFlags = (BfObjectFlags)((object->mObjectFlags & ~BfObjectFlag_StackAlloc) | BfObjectFlag_Deleted);
|
||||
#ifdef BF_GC_SUPPORTED
|
||||
gBFGC.ObjectDeleteRequested(object);
|
||||
#endif
|
||||
}
|
||||
|
||||
int GetStackTrace(void **result, int max_depth, int skip_count);
|
||||
|
||||
void BfLog(const char* fmt ...);
|
||||
|
||||
static const int cMaxStackTraceCount = 1024;
|
||||
struct PendingAllocState
|
||||
{
|
||||
bool mHasData;
|
||||
void* mStackTrace[cMaxStackTraceCount];
|
||||
int mStackTraceCount;
|
||||
int mMetadataBytes;
|
||||
bool mIsLargeAlloc;
|
||||
|
||||
bool IsSmall(intptr curAllocBytes)
|
||||
{
|
||||
if ((mStackTraceCount > 255) || (mMetadataBytes > 255))
|
||||
return false;
|
||||
|
||||
const intptr maxSmallObjectSize = ((intptr)1 << ((sizeof(intptr) - 2) * 8)) - 1;
|
||||
if (curAllocBytes <= maxSmallObjectSize)
|
||||
return true;
|
||||
|
||||
intptr objBytes = curAllocBytes - mStackTraceCount * sizeof(intptr) - mMetadataBytes;
|
||||
return (objBytes < maxSmallObjectSize);
|
||||
}
|
||||
};
|
||||
|
||||
static __thread PendingAllocState gPendingAllocState = { 0 };
|
||||
|
||||
void Internal::Dbg_ReserveMetadataBytes(intptr metadataBytes, intptr& curAllocBytes)
|
||||
{
|
||||
bool isSmall = gPendingAllocState.IsSmall(curAllocBytes);
|
||||
gPendingAllocState.mMetadataBytes += (int)metadataBytes;
|
||||
curAllocBytes += metadataBytes;
|
||||
if ((isSmall) && (gPendingAllocState.mMetadataBytes > 255))
|
||||
{
|
||||
// We just went to 'small' to not small
|
||||
curAllocBytes += sizeof(intptr);
|
||||
}
|
||||
}
|
||||
|
||||
void* Internal::Dbg_GetMetadata(bf::System::Object* obj)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
intptr Internal::Dbg_PrepareStackTrace(intptr baseAllocSize, intptr maxStackTraceDepth)
|
||||
{
|
||||
int allocSize = 0;
|
||||
if (maxStackTraceDepth > 1)
|
||||
{
|
||||
int capturedTraceCount = BF_CAPTURE_STACK(1, (intptr*)gPendingAllocState.mStackTrace, min((int)maxStackTraceDepth, 1024));
|
||||
gPendingAllocState.mStackTraceCount = capturedTraceCount;
|
||||
const intptr maxSmallObjectSize = ((intptr)1 << ((sizeof(intptr) - 2) * 8)) - 1;
|
||||
if ((capturedTraceCount > 255) || (baseAllocSize >= maxSmallObjectSize))
|
||||
{
|
||||
gPendingAllocState.mIsLargeAlloc = true;
|
||||
allocSize += (1 + capturedTraceCount) * sizeof(intptr);
|
||||
}
|
||||
else
|
||||
{
|
||||
gPendingAllocState.mIsLargeAlloc = false;
|
||||
allocSize += capturedTraceCount * sizeof(intptr);
|
||||
}
|
||||
}
|
||||
return allocSize;
|
||||
}
|
||||
|
||||
bf::System::Object* Internal::Dbg_ObjectAlloc(bf::System::Reflection::TypeInstance* typeInst, intptr size)
|
||||
{
|
||||
BF_ASSERT((gBfRtFlags & BfRtFlags_ObjectHasDebugFlags) != 0);
|
||||
Object* result;
|
||||
int allocSize = BF_ALIGN(size, typeInst->mInstAlign);
|
||||
uint8* allocBytes = (uint8*)BfObjectAllocate(allocSize, typeInst->GetType());
|
||||
// int dataOffset = (int)(sizeof(intptr) * 2);
|
||||
// memset(allocBytes + dataOffset, 0, size - dataOffset);
|
||||
result = (bf::System::Object*)allocBytes;
|
||||
auto classVData = typeInst->mTypeClassVData;
|
||||
|
||||
#ifndef BFRT_NODBGFLAGS
|
||||
intptr dbgAllocInfo = (intptr)BF_RETURN_ADDRESS;
|
||||
result->mClassVData = (intptr)classVData | (intptr)BfObjectFlag_Allocated /*| BFGC::sAllocFlags*/;
|
||||
BF_FULL_MEMORY_FENCE(); // Since we depend on mDbAllocInfo to determine if we are allocated, we need to set this last after we're set up
|
||||
result->mDbgAllocInfo = dbgAllocInfo;
|
||||
BF_FULL_MEMORY_FENCE();
|
||||
result->mClassVData = (result->mClassVData & ~BF_OBJECTFLAG_MARK_ID_MASK) | BFGC::sAllocFlags;
|
||||
#endif
|
||||
return result;
|
||||
}
|
||||
|
||||
//#define DBG_OBJECTEND
|
||||
|
||||
bf::System::Object* Internal::Dbg_ObjectAlloc(bf::System::ClassVData* classVData, intptr size, intptr align, intptr maxStackTraceDepth)
|
||||
{
|
||||
void* stackTrace[1024];
|
||||
int capturedTraceCount = 0;
|
||||
intptr allocSize = size;
|
||||
bool largeAllocInfo = false;
|
||||
|
||||
if ((gBfRtFlags & BfRtFlags_ObjectHasDebugFlags) != 0)
|
||||
{
|
||||
if (maxStackTraceDepth > 1)
|
||||
{
|
||||
capturedTraceCount = BF_CAPTURE_STACK(1, (intptr*)stackTrace, min((int)maxStackTraceDepth, 1024));
|
||||
const intptr maxSmallObjectSize = ((intptr)1 << ((sizeof(intptr) - 2) * 8)) - 1;
|
||||
if ((capturedTraceCount > 255) || (size >= maxSmallObjectSize))
|
||||
{
|
||||
largeAllocInfo = true;
|
||||
allocSize += (1 + capturedTraceCount) * sizeof(intptr);
|
||||
}
|
||||
else
|
||||
allocSize += capturedTraceCount * sizeof(intptr);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef DBG_OBJECTEND
|
||||
allocSize += 4;
|
||||
#endif
|
||||
|
||||
bf::System::Object* result;
|
||||
if ((gBfRtFlags & BfRtFlags_LeakCheck) != 0)
|
||||
{
|
||||
allocSize = BF_ALIGN(allocSize, align);
|
||||
uint8* allocBytes = (uint8*)BfObjectAllocate(allocSize, classVData->mType);
|
||||
// int dataOffset = (int)(sizeof(intptr) * 2);
|
||||
// memset(allocBytes + dataOffset, 0, size - dataOffset);
|
||||
result = (bf::System::Object*)(allocBytes);
|
||||
}
|
||||
else
|
||||
{
|
||||
#if BF_USE_STOMP_ALLOC
|
||||
result = (bf::System::Object*)StompAlloc(allocSize);
|
||||
#elif BF_TRACK_SIZES
|
||||
sHighestId = BF_MAX(sHighestId, classVData->mType->mTypeId);
|
||||
uint8* allocPtr = (uint8*)malloc(size + 16);
|
||||
*((int*)allocPtr) = size;
|
||||
sAllocSizes[classVData->mType->mTypeId] += size;
|
||||
result = (bf::System::Object*)(allocPtr + 16);
|
||||
#else
|
||||
if ((gBfRtFlags & BfRtFlags_DebugAlloc) != 0)
|
||||
{
|
||||
uint8* allocBytes = (uint8*)BfRawAllocate(allocSize, &sObjectAllocData, NULL, 0);
|
||||
result = (bf::System::Object*)allocBytes;
|
||||
}
|
||||
else
|
||||
{
|
||||
uint8* allocBytes = (uint8*)gBfRtCallbacks.Alloc(allocSize);
|
||||
result = (bf::System::Object*)allocBytes;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifndef BFRT_NODBGFLAGS
|
||||
if ((gBfRtFlags & BfRtFlags_ObjectHasDebugFlags) != 0)
|
||||
{
|
||||
// The order is very important here-
|
||||
// Once we set mDbgAllocInfo, the memory will be recognized by the GC as being a valid object.
|
||||
// There's a race condition with the alloc flags, however-- if the GC pauses our thread after we write
|
||||
// the mClassVData but before mDbgAllocInfo is set, then the object won't be marked with the new mark id
|
||||
// and it will appear as a leak during the Sweep. Thus, we leave the sAllocFlags off until AFTER
|
||||
// mDbgAllocInfo is set, so that the object will be ignored during sweeping unless we get all the way
|
||||
// through.
|
||||
|
||||
intptr dbgAllocInfo;
|
||||
auto classVDataVal = (intptr)classVData | (intptr)BfObjectFlag_Allocated;
|
||||
result->mClassVData = classVDataVal;
|
||||
if (maxStackTraceDepth <= 1)
|
||||
dbgAllocInfo = (intptr)BF_RETURN_ADDRESS;
|
||||
else
|
||||
{
|
||||
if (largeAllocInfo)
|
||||
{
|
||||
result->mClassVData |= (intptr)BfObjectFlag_AllocInfo;
|
||||
dbgAllocInfo = size;
|
||||
*(intptr*)((uint8*)result + size) = capturedTraceCount;
|
||||
memcpy((uint8*)result + size + sizeof(intptr), stackTrace, capturedTraceCount * sizeof(intptr));
|
||||
}
|
||||
else
|
||||
{
|
||||
result->mClassVData |= (intptr)BfObjectFlag_AllocInfo_Short;
|
||||
dbgAllocInfo = (size << 16) | capturedTraceCount;
|
||||
memcpy((uint8*)result + size, stackTrace, capturedTraceCount * sizeof(intptr));
|
||||
}
|
||||
}
|
||||
BF_FULL_MEMORY_FENCE(); // Since we depend on mDbAllocInfo to determine if we are allocated, we need to set this last after we're set up
|
||||
result->mDbgAllocInfo = dbgAllocInfo;
|
||||
BF_FULL_MEMORY_FENCE();
|
||||
// If the GC has already set the correct mark id then we don't need want to overwrite it - we could have an old value
|
||||
BfpSystem_InterlockedCompareExchangePtr((uintptr*)&result->mClassVData, (uintptr)classVDataVal, classVDataVal | BFGC::sAllocFlags);
|
||||
//result->mClassVData = (result->mClassVData & ~BF_OBJECTFLAG_MARK_ID_MASK) | BFGC::sAllocFlags;
|
||||
}
|
||||
else
|
||||
#endif
|
||||
result->mClassVData = (intptr)classVData;
|
||||
|
||||
//OutputDebugStrF("Object %@ ClassVData %@\n", result, classVData);
|
||||
|
||||
#ifdef DBG_OBJECTEND
|
||||
*(uint32*)((uint8*)result + size) = 0xBFBFBFBF;
|
||||
#endif
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void Internal::Dbg_ObjectStackInit(bf::System::Object* result, bf::System::ClassVData* classVData)
|
||||
{
|
||||
BF_ASSERT((gBfRtFlags & BfRtFlags_ObjectHasDebugFlags) != 0);
|
||||
|
||||
result->mClassVData = (intptr)classVData | (intptr)BfObjectFlag_StackAlloc;
|
||||
#ifndef BFRT_NODBGFLAGS
|
||||
result->mDbgAllocInfo = (intptr)BF_RETURN_ADDRESS;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void SetupDbgAllocInfo(bf::System::Object* result, intptr origSize)
|
||||
{
|
||||
#ifndef BFRT_NODBGFLAGS
|
||||
if (gPendingAllocState.mStackTraceCount == 0)
|
||||
{
|
||||
result->mDbgAllocInfo = 1; // Must have a value
|
||||
return;
|
||||
}
|
||||
if (gPendingAllocState.mStackTraceCount == 1)
|
||||
{
|
||||
result->mDbgAllocInfo = (intptr)gPendingAllocState.mStackTrace[0];
|
||||
return;
|
||||
}
|
||||
if (gPendingAllocState.mIsLargeAlloc)
|
||||
{
|
||||
result->mClassVData |= (intptr)BfObjectFlag_AllocInfo;
|
||||
result->mDbgAllocInfo = origSize;
|
||||
*(intptr*)((uint8*)result + origSize) = gPendingAllocState.mStackTraceCount;
|
||||
memcpy((uint8*)result + origSize + sizeof(intptr), gPendingAllocState.mStackTrace, gPendingAllocState.mStackTraceCount * sizeof(intptr));
|
||||
}
|
||||
else
|
||||
{
|
||||
result->mClassVData |= (intptr)BfObjectFlag_AllocInfo_Short;
|
||||
result->mDbgAllocInfo = (origSize << 16) | gPendingAllocState.mStackTraceCount;
|
||||
memcpy((uint8*)result + origSize, gPendingAllocState.mStackTrace, gPendingAllocState.mStackTraceCount * sizeof(intptr));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void Internal::Dbg_ObjectCreated(bf::System::Object* result, intptr size, bf::System::ClassVData* classVData)
|
||||
{
|
||||
BF_ASSERT((gBfRtFlags & BfRtFlags_ObjectHasDebugFlags) != 0);
|
||||
#ifndef BFRT_NODBGFLAGS
|
||||
BF_ASSERT_REL((result->mClassVData & ~(BfObjectFlag_Allocated | BfObjectFlag_Mark3)) == (intptr)classVData);
|
||||
result->mDbgAllocInfo = (intptr)BF_RETURN_ADDRESS;
|
||||
#endif
|
||||
}
|
||||
|
||||
void Internal::Dbg_ObjectCreatedEx(bf::System::Object* result, intptr origSize, bf::System::ClassVData* classVData)
|
||||
{
|
||||
BF_ASSERT((gBfRtFlags & BfRtFlags_ObjectHasDebugFlags) != 0);
|
||||
#ifndef BFRT_NODBGFLAGS
|
||||
BF_ASSERT_REL((result->mClassVData & ~(BfObjectFlag_Allocated | BfObjectFlag_Mark3)) == (intptr)classVData);
|
||||
SetupDbgAllocInfo(result, origSize);
|
||||
#endif
|
||||
}
|
||||
|
||||
void Internal::Dbg_ObjectAllocated(bf::System::Object* result, intptr size, bf::System::ClassVData* classVData)
|
||||
{
|
||||
BF_ASSERT((gBfRtFlags & BfRtFlags_ObjectHasDebugFlags) != 0);
|
||||
result->mClassVData = (intptr)classVData;
|
||||
#ifndef BFRT_NODBGFLAGS
|
||||
result->mDbgAllocInfo = (intptr)BF_RETURN_ADDRESS;
|
||||
#endif
|
||||
}
|
||||
|
||||
void Internal::Dbg_ObjectAllocatedEx(bf::System::Object* result, intptr origSize, bf::System::ClassVData* classVData)
|
||||
{
|
||||
BF_ASSERT((gBfRtFlags & BfRtFlags_ObjectHasDebugFlags) != 0);
|
||||
result->mClassVData = (intptr)classVData;
|
||||
SetupDbgAllocInfo(result, origSize);
|
||||
}
|
||||
|
||||
void Internal::Dbg_ObjectPreDelete(bf::System::Object* object)
|
||||
{
|
||||
BF_ASSERT((gBfRtFlags & BfRtFlags_ObjectHasDebugFlags) != 0);
|
||||
|
||||
#ifndef BFRT_NODBGFLAGS
|
||||
const char* errorPtr = NULL;
|
||||
|
||||
if ((object->mObjectFlags & BfObjectFlag_StackAlloc) != 0)
|
||||
{
|
||||
if ((object->mObjectFlags & BfObjectFlag_Allocated) == 0)
|
||||
errorPtr = "Attempting to delete stack-allocated object";
|
||||
else
|
||||
errorPtr = "Deleting an object that was detected as leaked (internal error)";
|
||||
}
|
||||
else if ((object->mObjectFlags & BfObjectFlag_AppendAlloc) != 0)
|
||||
errorPtr = "Attempting to delete append-allocated object, use 'delete append' statement instead of 'delete'";
|
||||
else if ((object->mObjectFlags & BfObjectFlag_Allocated) == 0)
|
||||
{
|
||||
errorPtr = "Attempting to delete custom-allocated object without specifying allocator";
|
||||
#if _WIN32
|
||||
MEMORY_BASIC_INFORMATION stackInfo = { 0 };
|
||||
VirtualQuery(object, &stackInfo, sizeof(MEMORY_BASIC_INFORMATION));
|
||||
if ((stackInfo.Protect & PAGE_READONLY) != 0)
|
||||
errorPtr = "Attempting to delete read-only object";
|
||||
#endif
|
||||
}
|
||||
else if ((object->mObjectFlags & BfObjectFlag_Deleted) != 0)
|
||||
errorPtr = "Attempting second delete on object";
|
||||
|
||||
if (errorPtr != NULL)
|
||||
{
|
||||
Beefy::String errorStr = errorPtr;
|
||||
|
||||
Beefy::String typeName = object->GetTypeName();
|
||||
errorStr += "\x1";
|
||||
errorStr += StrFormat("LEAK\t0x%@\n", object);
|
||||
errorStr += StrFormat(" (%s)0x%@\n", typeName.c_str(), object);
|
||||
SETUP_ERROR(errorStr.c_str(), 2);
|
||||
BF_DEBUG_BREAK();
|
||||
gBfRtCallbacks.DebugMessageData_Fatal();
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void Internal::Dbg_ObjectPreCustomDelete(bf::System::Object* object)
|
||||
{
|
||||
BF_ASSERT((gBfRtFlags & BfRtFlags_ObjectHasDebugFlags) != 0);
|
||||
|
||||
const char* errorPtr = NULL;
|
||||
|
||||
if ((object->mObjectFlags & BfObjectFlag_StackAlloc) != 0)
|
||||
errorPtr = "Attempting to delete stack-allocated object";
|
||||
if ((object->mObjectFlags & BfObjectFlag_Deleted) != 0)
|
||||
errorPtr = "Attempting second delete on object";
|
||||
|
||||
if (errorPtr != NULL)
|
||||
{
|
||||
Beefy::String errorStr = errorPtr;
|
||||
|
||||
Beefy::String typeName = object->GetTypeName();
|
||||
errorStr += "\x1";
|
||||
errorStr += StrFormat("LEAK\t0x%@\n", object);
|
||||
errorStr += StrFormat(" (%s)0x%@\n", typeName.c_str(), object);
|
||||
SETUP_ERROR(errorStr.c_str(), 2);
|
||||
BF_DEBUG_BREAK();
|
||||
gBfRtCallbacks.DebugMessageData_Fatal();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
void* Internal::Dbg_RawAlloc(intptr size, DbgRawAllocData* rawAllocData)
|
||||
{
|
||||
void* stackTrace[1024];
|
||||
int capturedTraceCount = 0;
|
||||
#ifndef BFRT_NODBGFLAGS
|
||||
if (rawAllocData->mMaxStackTrace == 1)
|
||||
{
|
||||
stackTrace[0] = BF_RETURN_ADDRESS;
|
||||
capturedTraceCount = 1;
|
||||
}
|
||||
else if (rawAllocData->mMaxStackTrace > 1)
|
||||
{
|
||||
capturedTraceCount = BF_CAPTURE_STACK(1, (intptr*)stackTrace, min(rawAllocData->mMaxStackTrace, 1024));
|
||||
}
|
||||
#endif
|
||||
return BfRawAllocate(size, rawAllocData, stackTrace, capturedTraceCount);
|
||||
}
|
||||
|
||||
void* Internal::Dbg_RawMarkedAlloc(intptr size, void* markFunc)
|
||||
{
|
||||
return BfRawAllocate(size, &sEmptyAllocData, NULL, 0);
|
||||
}
|
||||
|
||||
void* Internal::Dbg_RawMarkedArrayAlloc(intptr elemCount, intptr elemStride, void* markFunc)
|
||||
{
|
||||
return BfRawAllocate(elemCount * elemStride, &sEmptyAllocData, NULL, 0);
|
||||
}
|
||||
|
||||
void* Internal::Dbg_RawAlloc(intptr size)
|
||||
{
|
||||
return BfRawAllocate(size, &sEmptyAllocData, NULL, 0);
|
||||
}
|
||||
|
||||
void* Internal::Dbg_RawObjectAlloc(intptr size)
|
||||
{
|
||||
return BfRawAllocate(size, &sObjectAllocData, NULL, 0);
|
||||
}
|
||||
|
||||
void Internal::Dbg_RawFree(void* ptr)
|
||||
{
|
||||
BfRawFree(ptr);
|
||||
}
|
48
BeefRT/dbg/DbgThread.cpp
Normal file
48
BeefRT/dbg/DbgThread.cpp
Normal file
|
@ -0,0 +1,48 @@
|
|||
#include "../rt/Thread.h"
|
||||
#include "DbgThread.h"
|
||||
|
||||
USING_NS_BF;
|
||||
|
||||
using namespace bf::System;
|
||||
using namespace bf::System::Threading;
|
||||
|
||||
void Thread::Dbg_CreateInternal()
|
||||
{
|
||||
BF_ASSERT((gBfRtFlags & BfRtFlags_LeakCheck) != 0);
|
||||
|
||||
auto internalThread = new BfDbgInternalThread();
|
||||
SetInternalThread(internalThread);
|
||||
}
|
||||
|
||||
BfDbgInternalThread::BfDbgInternalThread()
|
||||
{
|
||||
// mBFIThreadData = NULL;
|
||||
// mTCMallocObjThreadCache = NULL;
|
||||
// mReadCheckCount = 0;
|
||||
// mLastGCScanIdx = 0;
|
||||
// mLastStackScanIdx = 0;
|
||||
// mSectionDepth = 0;
|
||||
}
|
||||
|
||||
BfDbgInternalThread::~BfDbgInternalThread()
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
void BfDbgInternalThread::ThreadStarted()
|
||||
{
|
||||
int threadPriority = BfpThread_GetPriority(mThreadHandle, NULL);
|
||||
mRunning = true;
|
||||
if ((gBfRtFlags & BfRtFlags_LeakCheck) != 0)
|
||||
gBFGC.ThreadStarted(this);
|
||||
}
|
||||
|
||||
void BfDbgInternalThread::ThreadStopped()
|
||||
{
|
||||
mRunning = false;
|
||||
if ((gBfRtFlags & BfRtFlags_LeakCheck) != 0)
|
||||
{
|
||||
// Don't access thread after ThreadStopped -- the thread may be deleted
|
||||
gBFGC.ThreadStopped(this);
|
||||
}
|
||||
}
|
18
BeefRT/dbg/DbgThread.h
Normal file
18
BeefRT/dbg/DbgThread.h
Normal file
|
@ -0,0 +1,18 @@
|
|||
#pragma once
|
||||
|
||||
#include "../rt/Thread.h"
|
||||
#include "gc.h"
|
||||
|
||||
namespace tcmalloc_obj
|
||||
{
|
||||
class ThreadCache;
|
||||
}
|
||||
|
||||
class BfDbgInternalThread : public BfInternalThread
|
||||
{
|
||||
public:
|
||||
BfDbgInternalThread();
|
||||
virtual ~BfDbgInternalThread();
|
||||
virtual void ThreadStarted() override;
|
||||
virtual void ThreadStopped() override;
|
||||
};
|
26
BeefRT/dbg/dbgmain.cpp
Normal file
26
BeefRT/dbg/dbgmain.cpp
Normal file
|
@ -0,0 +1,26 @@
|
|||
#include "gc.h"
|
||||
|
||||
#ifdef BF_PLATFORM_WINDOWS
|
||||
|
||||
BOOL WINAPI DllMain(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
|
||||
{
|
||||
switch (fdwReason)
|
||||
{
|
||||
case DLL_PROCESS_ATTACH:
|
||||
gBFGC.ThreadStarted();
|
||||
break;
|
||||
case DLL_THREAD_ATTACH:
|
||||
gBFGC.ThreadStarted();
|
||||
break;
|
||||
case DLL_THREAD_DETACH:
|
||||
gBFGC.ThreadStopped();
|
||||
break;
|
||||
case DLL_PROCESS_DETACH:
|
||||
gBFGC.ThreadStopped();
|
||||
break;
|
||||
}
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
#endif
|
2774
BeefRT/dbg/gc.cpp
Normal file
2774
BeefRT/dbg/gc.cpp
Normal file
File diff suppressed because it is too large
Load diff
475
BeefRT/dbg/gc.h
Normal file
475
BeefRT/dbg/gc.h
Normal file
|
@ -0,0 +1,475 @@
|
|||
#pragma once
|
||||
|
||||
#include "BeefySysLib/Common.h"
|
||||
#include "BeefySysLib/util/CritSect.h"
|
||||
#include "BeefySysLib/util/Array.h"
|
||||
#include "BeefySysLib/util/Dictionary.h"
|
||||
#include <unordered_map>
|
||||
#include <map>
|
||||
#include "../rt/BfObjects.h"
|
||||
|
||||
//#include "boost/lockfree/stack.hpp"
|
||||
|
||||
#ifdef BF_PLATFORM_WINDOWS
|
||||
#define BF_GC_SUPPORTED
|
||||
#endif
|
||||
|
||||
#ifdef BF_GC_SUPPORTED
|
||||
|
||||
class GCDbgData
|
||||
{
|
||||
public:
|
||||
static const int MAX_SIZE_CLASSES = 95;
|
||||
|
||||
public:
|
||||
BfRtFlags mDbgFlags;
|
||||
void* mObjRootPtr;
|
||||
void* mRawRootPtr;
|
||||
void* mRawObjectSentinel;
|
||||
int mSizeClasses[MAX_SIZE_CLASSES];
|
||||
};
|
||||
extern "C" GCDbgData gGCDbgData;
|
||||
|
||||
class BfDbgInternalThread;
|
||||
|
||||
void* BfRawAllocate(intptr elemCount, bf::System::DbgRawAllocData* rawAllocData, void* stackTraceInfo, int stackTraceCount);
|
||||
void BfRawFree(void* ptr);
|
||||
|
||||
void* BfObjectAllocate(intptr size, bf::System::Type* type);
|
||||
|
||||
void BFDumpAllocStats();
|
||||
|
||||
class BfInternalThread;
|
||||
|
||||
// Incremental/concurrent requires write barriers
|
||||
//#define BF_GC_INCREMENTAL
|
||||
|
||||
//#define BF_GC_LOG_ENABLED
|
||||
#ifdef BF_DEBUG
|
||||
//#define BF_GC_LOG_ENABLED
|
||||
#endif
|
||||
|
||||
#ifdef BF_GC_LOG_ENABLED
|
||||
void BFGCLogWrite();
|
||||
//void BFGCLogAlloc(bf::System::Object* obj, bf::System::Type* objType, int allocNum);
|
||||
#ifdef BF_PLATFORM_WINDOWS
|
||||
#define BF_LOGASSERT(_Expression) (void)( (!!(_Expression)) || (BFGCLogWrite(), 0) || (Beefy::BFFatalError(#_Expression, __FILE__, __LINE__), 0) )
|
||||
#else
|
||||
#define BF_LOGASSERT(_Expression) (void)( (!!(_Expression)) || (BFGCLogWrite(), 0) || (BF_ASSERT(_Expression), 0) )
|
||||
#endif
|
||||
#else
|
||||
#define BF_LOGASSERT(_Expression)
|
||||
#endif
|
||||
|
||||
extern HANDLE gGCHeap;
|
||||
|
||||
template <class T>
|
||||
class GCAllocStd
|
||||
{
|
||||
public:
|
||||
typedef size_t size_type;
|
||||
typedef ptrdiff_t difference_type;
|
||||
typedef T* pointer;
|
||||
typedef const T* const_pointer;
|
||||
typedef T& reference;
|
||||
typedef const T& const_reference;
|
||||
typedef T value_type;
|
||||
|
||||
GCAllocStd() {}
|
||||
GCAllocStd(const GCAllocStd&) {}
|
||||
|
||||
pointer allocate(size_type n, const void * = 0)
|
||||
{
|
||||
if (gGCHeap == NULL)
|
||||
gGCHeap = ::HeapCreate(0, 0, 0);
|
||||
return (T*)::HeapAlloc(gGCHeap, 0, n * sizeof(T));
|
||||
}
|
||||
|
||||
void deallocate(void* p, size_type)
|
||||
{
|
||||
::HeapFree(gGCHeap, 0, p);
|
||||
}
|
||||
pointer address(reference x) const { return &x; }
|
||||
const_pointer address(const_reference x) const { return &x; }
|
||||
GCAllocStd<T>& operator=(const GCAllocStd&) { return *this; }
|
||||
void construct(pointer p, const T& val)
|
||||
{
|
||||
new ((T*)p) T(val);
|
||||
}
|
||||
void destroy(pointer p) { p->~T(); }
|
||||
size_type max_size() const { return size_t(-1); }
|
||||
|
||||
template <class U>
|
||||
struct rebind { typedef GCAllocStd<U> other; };
|
||||
|
||||
template <class U>
|
||||
GCAllocStd(const GCAllocStd<U>&) {}
|
||||
|
||||
template <class U>
|
||||
GCAllocStd& operator=(const GCAllocStd<U>&) { return *this; }
|
||||
};
|
||||
|
||||
template <class T>
|
||||
class GCAlloc
|
||||
{
|
||||
public:
|
||||
T* allocate(intptr n)
|
||||
{
|
||||
if (gGCHeap == NULL)
|
||||
gGCHeap = ::HeapCreate(0, 0, 0);
|
||||
return (T*)::HeapAlloc(gGCHeap, 0, n * sizeof(T));
|
||||
}
|
||||
|
||||
void deallocate(void* p)
|
||||
{
|
||||
::HeapFree(gGCHeap, 0, p);
|
||||
}
|
||||
};
|
||||
|
||||
class BFIThreadData
|
||||
{
|
||||
public:
|
||||
|
||||
};
|
||||
|
||||
//#define BF_OBJECTFLAG_FREED 0x10
|
||||
//#define BF_OBJECTFLAG_QUEUE_FULLMARK 0x20
|
||||
|
||||
//If we need an extr aflag, we could use two bits to store Allocated/Stack_Alloc/Append_Alloc since those are all
|
||||
// mutually exclusive
|
||||
// If ALLOCINFO is not set mAllocVal is a single stack trace entry
|
||||
// When ALLOC_INFO_SHORT is set, mDebugData is: [ObjectSize:*][MetadataSize:8][StackCount:8]
|
||||
// if any of those fields is too large then we use ALLOC_INFO, where ObjectSize takes the whole mDebugData,
|
||||
/// then at the end of the object we have: [MetadataSize:*][StackCount:16]
|
||||
#define BF_OBJECTFLAG_MARK_ID_MASK 0x03
|
||||
#define BF_OBJECTFLAG_ALLOCATED 0x04
|
||||
#define BF_OBJECTFLAG_STACK_ALLOC 0x08
|
||||
#define BF_OBJECTFLAG_APPEND_ALLOC 0x10
|
||||
#define BF_OBJECTFLAG_ALLOCINFO 0x20
|
||||
#define BF_OBJECTFLAG_ALLOCINFO_SHORT 0x40
|
||||
#define BF_OBJECTFLAG_DELETED 0x80
|
||||
|
||||
namespace tcmalloc_obj
|
||||
{
|
||||
struct Span;
|
||||
}
|
||||
|
||||
namespace tcmalloc_raw
|
||||
{
|
||||
struct Span;
|
||||
}
|
||||
|
||||
|
||||
class BFGC
|
||||
{
|
||||
public:
|
||||
struct ThreadInfo
|
||||
{
|
||||
static BF_TLS_DECLSPEC ThreadInfo* sCurThreadInfo;
|
||||
|
||||
Beefy::CritSect mCritSect;
|
||||
BfpThread* mThreadHandle;
|
||||
BfpThreadId mThreadId;
|
||||
void* mTEB;
|
||||
intptr mStackStart;
|
||||
bool mRunning;
|
||||
Beefy::Array<bf::System::Object*> mStackMarkableObjects;
|
||||
|
||||
ThreadInfo()
|
||||
{
|
||||
mThreadId = 0;
|
||||
mThreadHandle = NULL;
|
||||
mTEB = NULL;
|
||||
mStackStart = NULL;
|
||||
mRunning = true;
|
||||
}
|
||||
|
||||
~ThreadInfo();
|
||||
};
|
||||
|
||||
struct RawLeakInfo
|
||||
{
|
||||
bf::System::DbgRawAllocData* mRawAllocData;
|
||||
void* mDataPtr;
|
||||
void* mStackTracePtr;
|
||||
int mStackTraceCount;
|
||||
int mDataCount;
|
||||
};
|
||||
|
||||
struct CollectReport
|
||||
{
|
||||
int mCollectIdx;
|
||||
uint32 mStartTick;
|
||||
int mTotalMS;
|
||||
int mPausedMS;
|
||||
int mCollectCount;
|
||||
};
|
||||
|
||||
struct SweepInfo
|
||||
{
|
||||
Beefy::Array<bf::System::Object* /*, GCAlloc<System::Object*> */> mLeakObjects;
|
||||
Beefy::Array<RawLeakInfo> mRawLeaks;
|
||||
int mLeakCount;
|
||||
bool mShowAllAsLeaks;
|
||||
bool mEmptyScan;
|
||||
|
||||
SweepInfo()
|
||||
{
|
||||
mLeakCount = 0;
|
||||
mShowAllAsLeaks = false;
|
||||
mEmptyScan = false;
|
||||
}
|
||||
|
||||
void Clear()
|
||||
{
|
||||
mLeakCount = 0;
|
||||
mShowAllAsLeaks = false;
|
||||
mLeakObjects.Clear();
|
||||
mRawLeaks.Clear();
|
||||
}
|
||||
};
|
||||
|
||||
struct TLSMember
|
||||
{
|
||||
intptr mTLSOffset;
|
||||
void* mMarkFunc;
|
||||
int mTLSIndex;
|
||||
};
|
||||
|
||||
struct AllocInfo
|
||||
{
|
||||
int mCount;
|
||||
int mSize;
|
||||
|
||||
bool operator<(const AllocInfo &rhs) const
|
||||
{
|
||||
return mSize > rhs.mSize;
|
||||
}
|
||||
};
|
||||
|
||||
enum
|
||||
{
|
||||
DEBUGDUMPSTATE_NONE,
|
||||
DEBUGDUMPSTATE_WAITING_FOR_PREV,
|
||||
DEBUGDUMPSTATE_WAITING_FOR_PREV_2,
|
||||
DEBUGDUMPSTATE_WAITING_FOR_MUTATOR,
|
||||
DEBUGDUMPSTATE_WAITING_FOR_GC
|
||||
};
|
||||
|
||||
Beefy::CritSect mCritSect;
|
||||
Beefy::SyncEvent mCollectEvent;
|
||||
Beefy::SyncEvent mCollectDoneEvent;
|
||||
|
||||
BfpThread* mGCThread;
|
||||
//BfObject* mEphemeronTombstone;
|
||||
|
||||
BfpThreadId mThreadId;
|
||||
|
||||
volatile bool mExiting;
|
||||
volatile bool mRunning;
|
||||
bool mPaused;
|
||||
bool mShutdown;
|
||||
bool mWaitingForGC; // GC.Collect sets this
|
||||
int mAllocSinceLastGC; // Added to on alloc and subtracted from on nursery cleanup
|
||||
int mFreeSinceLastGC;
|
||||
|
||||
bool mFullGCTriggered;
|
||||
bool mForceDecommit;
|
||||
bool mSkipMark;
|
||||
volatile bool mCollectRequested;
|
||||
volatile bool mPerformingCollection;
|
||||
volatile bool mUsingThreadUnlocked;
|
||||
volatile int mDebugDumpState;
|
||||
Beefy::Array<bf::System::Object*> mExplicitRoots;
|
||||
Beefy::Array<TLSMember> mTLSMembers;
|
||||
Beefy::Array<CollectReport> mCollectReports;
|
||||
int mCollectIdx;
|
||||
void* mMainThreadTLSPtr;
|
||||
|
||||
int mMultiStackScanWait; // To avoid multiple stack scans per frame
|
||||
int mFullGCPeriod; // Maximum milliseconds between GC cycles
|
||||
int mFreeTrigger; // Bytes before a full GC is triggered
|
||||
int mMaxPausePercentage; // Maximum percentage we're allowed to stop threads
|
||||
int mMaxRawDeferredObjectFreePercentage; // Maximum percentage of heap usage to defer raw object
|
||||
|
||||
int mStackScanIdx;
|
||||
bool mDoStackDeepMark;
|
||||
int mStage;
|
||||
int mLastCollectFrame;
|
||||
int mCurMarkId;
|
||||
static int volatile sCurMarkId; //0-3
|
||||
static int volatile sAllocFlags;
|
||||
|
||||
Beefy::Array<bf::System::Object*> mPendingGCData;
|
||||
Beefy::Array<ThreadInfo*> mThreadList;
|
||||
int mCurMutatorMarkCount;
|
||||
int mCurGCMarkCount;
|
||||
int mCurGCObjectQueuedCount;
|
||||
int mCurMutatorObjectQueuedCount;
|
||||
int mCurObjectDeleteCount;
|
||||
int mCurFinalizersCalled;
|
||||
int mCurSweepFoundCount;
|
||||
int mCurSweepFoundPermanentCount;
|
||||
int mCurFreedBytes;
|
||||
int mCurLiveObjectCount;
|
||||
|
||||
int mCurScanIdx;
|
||||
int mTotalAllocs;
|
||||
int mTotalFrees;
|
||||
uint64 mBytesFreed;
|
||||
size_t mBytesRequested; // Consistent but race-susceptible. Fixup once per cycle from TLS data
|
||||
bool mRequestedSizesInvalid; // Can occur if reflection data is trimmed -- only matters for debug reporting anyway
|
||||
int mMarkDepthCount;
|
||||
bool mMarkingDeleted;
|
||||
bool mQueueMarkObjects;
|
||||
int mLastFreeCount;
|
||||
Beefy::Array<bf::System::Object* /*, GCAlloc<System::Object*>*/ > mFinalizeList;
|
||||
SweepInfo mSweepInfo;
|
||||
bool mDisplayFreedObjects;
|
||||
bool mHadRootError;
|
||||
|
||||
public:
|
||||
void RawInit();
|
||||
void RawShutdown();
|
||||
void WriteDebugDumpState();
|
||||
bool HandlePendingGCData(Beefy::Array<bf::System::Object*>* pendingGCData);
|
||||
bool HandlePendingGCData();
|
||||
|
||||
void MarkMembers(bf::System::Object* obj);
|
||||
void AdjustStackPtr(intptr& addr, int& size);
|
||||
bool ScanThreads();
|
||||
void ReportLeak(bf::System::Object* obj);
|
||||
void SweepSpan(tcmalloc_obj::Span* span, int expectedStartPage);
|
||||
void Sweep();
|
||||
void RawMarkSpan(tcmalloc_raw::Span* span, int expectedStartPage);
|
||||
void RawMarkAll();
|
||||
void ProcessSweepInfo();
|
||||
void ReleasePendingSpanObjects(tcmalloc_obj::Span* span);
|
||||
void ReleasePendingObjects();
|
||||
void ConservativeScan(void* addr, int length);
|
||||
bool IsHeapObject(bf::System::Object* obj);
|
||||
void MarkStatics();
|
||||
void ObjectDeleteRequested(bf::System::Object* obj);
|
||||
|
||||
void DoCollect(bool doingFullGC);
|
||||
void FinishCollect();
|
||||
void Run();
|
||||
|
||||
static void BFP_CALLTYPE RunStub(void* gc);
|
||||
|
||||
void DumpLeaksSpan(tcmalloc_obj::Span* span, int expectedStartPage, Beefy::StringImpl& msg);
|
||||
|
||||
public:
|
||||
BFGC();
|
||||
~BFGC();
|
||||
|
||||
void Init();
|
||||
void Start();
|
||||
void StopCollecting();
|
||||
void AddStackMarkableObject(bf::System::Object* obj);
|
||||
void RemoveStackMarkableObject(bf::System::Object* obj);
|
||||
void Shutdown();
|
||||
void InitDebugDump();
|
||||
void EndDebugDump();
|
||||
void SuspendThreads();
|
||||
void ResumeThreads();
|
||||
void PerformCollection();
|
||||
void Collect(bool async);
|
||||
void DebugDumpLeaks();
|
||||
|
||||
void ObjReportHandleSpan(tcmalloc_obj::Span* span, int expectedStartPage, int& objectCount, intptr& freeSize, Beefy::Dictionary<bf::System::Type*, AllocInfo>& sizeMap);
|
||||
void ObjReportScan(int& objectCount, intptr& freeSize, Beefy::Dictionary<bf::System::Type*, AllocInfo>& sizeMap);
|
||||
void RawReportHandleSpan(tcmalloc_raw::Span* span, int expectedStartPage, int& objectCount, intptr& freeSize, Beefy::Dictionary<bf::System::Type*, AllocInfo>* sizeMap);
|
||||
void RawReportScan(int& objectCount, intptr& freeSize, Beefy::Dictionary<bf::System::Type*, AllocInfo>* sizeMap);
|
||||
void Report();
|
||||
void RawReport(Beefy::String& msg, intptr& freeSize, std::multimap<AllocInfo, bf::System::Type*>& orderedSizeMap);
|
||||
void ReportTLSMember(int tlsIndex, void* ptr, void* markFunc);
|
||||
|
||||
//void RegisterRoot(BfObject* obj);
|
||||
|
||||
void ThreadStarted(BfDbgInternalThread* thread);
|
||||
void ThreadStopped(BfDbgInternalThread* thread);
|
||||
|
||||
void ThreadStarted();
|
||||
void ThreadStopped();
|
||||
|
||||
void MarkFromGCThread(bf::System::Object* obj); // Can only called from within GC thread
|
||||
|
||||
void SetAutoCollectPeriod(int periodMS);
|
||||
void SetCollectFreeThreshold(int freeBytes);
|
||||
void SetMaxPausePercentage(int maxPausePercentage);
|
||||
void SetMaxRawDeferredObjectFreePercentage(intptr maxPercentage);
|
||||
};
|
||||
|
||||
extern BFGC gBFGC;
|
||||
|
||||
#define BF_OBJALLOC_NO_ALLOCDONE(klass) (bfNewObject = new (BfObjectAllocate(sizeof(klass), klass::sBFTypeID)) klass(), bfNewObject->mBFVData = &klass::sClassVData, bfNewObject)
|
||||
#define BF_OBJALLOC(klass) (bfNewObject = new (BfObjectAllocate(sizeof(klass), klass::sBFTypeID)) klass(), bfNewObject->mBFVData = &klass::sClassVData, bfNewObject->BFAllocDone(0), bfNewObject)
|
||||
//#define BF_OBJALLOC(klass) (bfNewObject = new (BfObjectAllocate(sizeof(klass), klass::sBFTypeID)) klass(), bfNewObject->mBFVData = &klass::sClassVData, bfNewObject->BFAllocDone(0), BFCheckObjectSize(bfNewObject, sizeof(klass)), bfNewObject)
|
||||
#define BF_OBJALLOC_PERMANENT(klass) (bfNewObject = new (BfObjectAllocate(sizeof(klass), klass::sBFTypeID)) klass(), bfNewObject->mBFVData = &klass::sClassVData, bfNewObject->BFAllocDone(BF_OBJECTFLAG_PERMANENT), bfNewObject)
|
||||
|
||||
#else //BF_GC_SUPPORTED
|
||||
|
||||
class BFGC
|
||||
{
|
||||
public:
|
||||
static const int sAllocFlags = 0;
|
||||
};
|
||||
|
||||
void* BfObjectAllocate(intptr size, bf::System::Type* type);
|
||||
void* BfRawAllocate(intptr elemCount, bf::System::DbgRawAllocData* rawAllocData, void* stackTraceInfo, int stackTraceCount);
|
||||
void BfRawFree(void* ptr);
|
||||
|
||||
#endif
|
||||
|
||||
namespace bf
|
||||
{
|
||||
namespace System
|
||||
{
|
||||
class Object;
|
||||
namespace Threading
|
||||
{
|
||||
class Thread;
|
||||
}
|
||||
|
||||
class GC : public Object
|
||||
{
|
||||
private:
|
||||
BFRT_EXPORT static void Init();
|
||||
BFRT_EXPORT static void Run();
|
||||
static void MarkAllStaticMembers()
|
||||
{
|
||||
gBfRtCallbacks.GC_MarkAllStaticMembers();
|
||||
}
|
||||
static bool CallRootCallbacks()
|
||||
{
|
||||
return gBfRtCallbacks.GC_CallRootCallbacks();
|
||||
}
|
||||
BFRT_EXPORT static void ReportTLSMember(intptr tlsIndex, void* ptr, void* markFunc);
|
||||
BFRT_EXPORT static void StopCollecting();
|
||||
BFRT_EXPORT static void AddStackMarkableObject(Object* obj);
|
||||
BFRT_EXPORT static void RemoveStackMarkableObject(Object* obj);
|
||||
|
||||
public:
|
||||
BFRT_EXPORT static void Shutdown();
|
||||
BFRT_EXPORT static void Collect(bool async);
|
||||
BFRT_EXPORT static void Report();
|
||||
BFRT_EXPORT static void Mark(Object* obj);
|
||||
BFRT_EXPORT static void Mark(void* ptr, intptr size);
|
||||
BFRT_EXPORT static void DebugDumpLeaks();
|
||||
//static void ToLeakString(Object* obj, String* strBuffer);
|
||||
static void DoMarkAllStaticMembers()
|
||||
{
|
||||
MarkAllStaticMembers();
|
||||
}
|
||||
static bool DoCallRootCallbacks()
|
||||
{
|
||||
return CallRootCallbacks();
|
||||
}
|
||||
BFRT_EXPORT static void SetAutoCollectPeriod(intptr periodMS);
|
||||
BFRT_EXPORT static void SetCollectFreeThreshold(intptr freeBytes);
|
||||
BFRT_EXPORT static void SetMaxPausePercentage(intptr maxPausePercentage);
|
||||
BFRT_EXPORT static void SetMaxRawDeferredObjectFreePercentage(intptr maxPercentage);
|
||||
};
|
||||
}
|
||||
}
|
638
BeefRT/dbg/gc_raw.cpp
Normal file
638
BeefRT/dbg/gc_raw.cpp
Normal file
|
@ -0,0 +1,638 @@
|
|||
|
||||
#define _WIN32_WINNT _WIN32_WINNT_WIN8
|
||||
|
||||
// This spits out interesting stats periodically to the console
|
||||
#define BF_GC_PRINTSTATS
|
||||
|
||||
// Disable efficient new mass-freeing in TCMalloc
|
||||
//#define BF_GC_USE_OLD_FREE
|
||||
|
||||
#ifdef BF_DEBUG
|
||||
// Useful for tracing down memory corruption -- memory isn't returned to TCMalloc, it's just marked as freed. You'll run out eventually
|
||||
//#define BF_NO_FREE_MEMORY
|
||||
// Old and not too useful
|
||||
//#define BG_GC_TRACKPTRS
|
||||
#endif
|
||||
|
||||
#if defined BF_OBJECT_TRACK_ALLOCNUM && defined BF_NO_FREE_MEMORY
|
||||
//#define BF_GC_VERIFY_SWEEP_IDS
|
||||
#endif
|
||||
|
||||
//#define BF_SECTION_NURSERY
|
||||
|
||||
#ifdef BF_PLATFORM_WINDOWS
|
||||
#include <direct.h>
|
||||
#endif
|
||||
|
||||
#include "gc.h"
|
||||
|
||||
bf::System::DbgRawAllocData sEmptyAllocData = { 0 };
|
||||
bf::System::DbgRawAllocData sObjectAllocData = { 0 };
|
||||
|
||||
|
||||
#ifdef BF_GC_SUPPORTED
|
||||
|
||||
#include <fstream>
|
||||
#include "BeefySysLib/Common.h"
|
||||
#include "BeefySysLib/BFApp.h"
|
||||
#include "BeefySysLib/util/CritSect.h"
|
||||
#include "BeefySysLib/util/BeefPerf.h"
|
||||
#include "BeefySysLib/util/HashSet.h"
|
||||
#include "BeefySysLib/util/Dictionary.h"
|
||||
#include "BeefySysLib/util/Deque.h"
|
||||
#include <unordered_set>
|
||||
#include "../rt/BfObjects.h"
|
||||
#include "../rt/Thread.h"
|
||||
#include <map>
|
||||
|
||||
#define TCMALLOC_NO_MALLOCGUARD
|
||||
#define TCMALLOC_NAMESPACE tcmalloc_raw
|
||||
#define TCMALLOC_EXTERN static
|
||||
#include "gperftools/src/tcmalloc.cc"
|
||||
|
||||
using namespace tcmalloc_raw;
|
||||
using namespace Beefy;
|
||||
|
||||
struct DeferredFreeEntry
|
||||
{
|
||||
bf::System::Object* mObject;
|
||||
int mAllocSize;
|
||||
};
|
||||
|
||||
static Beefy::Deque<DeferredFreeEntry> gDeferredFrees;
|
||||
static int gRawAllocSize = 0;
|
||||
static int gMaxRawAllocSize = 0;
|
||||
static int gDeferredObjectFreeSize = 0;
|
||||
|
||||
void BFGC::RawInit()
|
||||
{
|
||||
if (UNLIKELY(Static::pageheap() == NULL)) ThreadCache::InitModule();
|
||||
ThreadCache::InitTSD();
|
||||
gGCDbgData.mRawRootPtr = Static::pageheap()->pagemap_.root_;
|
||||
gGCDbgData.mRawObjectSentinel = &sObjectAllocData;
|
||||
}
|
||||
|
||||
void BFGC::RawMarkSpan(tcmalloc_raw::Span* span, int expectedStartPage)
|
||||
{
|
||||
if ((gBfRtFlags & BfRtFlags_ObjectHasDebugFlags) == 0)
|
||||
return;
|
||||
|
||||
if (span->location != tcmalloc_raw::Span::IN_USE)
|
||||
return;
|
||||
|
||||
if (span->start != expectedStartPage)
|
||||
{
|
||||
// This check covers when a new multi-page span is being put into place
|
||||
// and we catch after the first block, and it also catches the case
|
||||
// when the allocator splits a span and the pagemap can hold a reference
|
||||
// to a span that no longer covers that location.
|
||||
// For both of these cases we ignore the span. Remember, the worst case
|
||||
// here is that we'll miss a sweep of an object, which would just delay it's
|
||||
// cleanup until next GC cycle. Because the GC is the sole freer of spans,
|
||||
// there can never be a case where we find a valid span and then the span
|
||||
// changes sizeclass or location before we can scan the memory it points to.
|
||||
//
|
||||
// This also covers the case where a page spans over a radix map section and
|
||||
// we catch it on an outer loop again
|
||||
return;
|
||||
}
|
||||
|
||||
intptr pageSize = (intptr)1 << kPageShift;
|
||||
int spanSize = pageSize * span->length;
|
||||
void* spanStart = (void*)((intptr)span->start << kPageShift);
|
||||
void* spanEnd = (void*)((intptr)spanStart + spanSize);
|
||||
void* spanPtr = spanStart;
|
||||
|
||||
BF_LOGASSERT((spanStart >= tcmalloc_raw::PageHeap::sAddressStart) && (spanEnd <= tcmalloc_raw::PageHeap::sAddressEnd));
|
||||
|
||||
int elementSize = Static::sizemap()->ByteSizeForClass(span->sizeclass);
|
||||
if (elementSize == 0)
|
||||
elementSize = spanSize;
|
||||
BF_LOGASSERT(elementSize >= sizeof(bf::System::Object));
|
||||
|
||||
while (spanPtr <= (uint8*)spanEnd - elementSize)
|
||||
{
|
||||
bf::System::DbgRawAllocData* rawAllocData = *(bf::System::DbgRawAllocData**)((uint8*)spanPtr + elementSize - sizeof(intptr));
|
||||
if (rawAllocData != NULL)
|
||||
{
|
||||
if (rawAllocData->mMarkFunc != NULL)
|
||||
{
|
||||
int extraDataSize = sizeof(intptr);
|
||||
if (rawAllocData->mMaxStackTrace == 1)
|
||||
{
|
||||
extraDataSize += sizeof(intptr);
|
||||
}
|
||||
else if (rawAllocData->mMaxStackTrace > 1)
|
||||
{
|
||||
int stackTraceCount = *(intptr*)((uint8*)spanPtr + elementSize - sizeof(intptr) - sizeof(intptr));
|
||||
extraDataSize += (1 + stackTraceCount) * sizeof(intptr);
|
||||
}
|
||||
|
||||
typedef void(*MarkFunc)(void*);
|
||||
MarkFunc markFunc = *(MarkFunc*)&rawAllocData->mMarkFunc;
|
||||
|
||||
// It's possible we can overestimate elemCount, particularly for large allocations. This doesn't cause a problem
|
||||
// because we can safely mark on complete random memory -- pointer values are always validated before being followed
|
||||
int elemStride = BF_ALIGN(rawAllocData->mType->mSize, rawAllocData->mType->mAlign);
|
||||
int dataSize = elementSize - extraDataSize;
|
||||
int elemCount = dataSize / elemStride;
|
||||
for (int elemIdx = 0; elemIdx < elemCount; elemIdx++)
|
||||
{
|
||||
markFunc((uint8*)spanPtr + elemIdx * elemStride);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
spanPtr = (void*)((intptr)spanPtr + elementSize);
|
||||
}
|
||||
}
|
||||
|
||||
void BFGC::RawMarkAll()
|
||||
{
|
||||
//BP_ZONE("Sweep");
|
||||
|
||||
mCurLiveObjectCount = 0;
|
||||
|
||||
#ifdef BF_GC_VERIFY_SWEEP_IDS
|
||||
maxAllocNum = bf::System::Object::sCurAllocNum;
|
||||
allocIdSet.clear();
|
||||
#endif
|
||||
|
||||
auto pageHeap = Static::pageheap();
|
||||
if (pageHeap == NULL)
|
||||
return;
|
||||
|
||||
int leafCheckCount = 0;
|
||||
|
||||
#ifdef BF32
|
||||
for (int rootIdx = 0; rootIdx < PageHeap::PageMap::ROOT_LENGTH; rootIdx++)
|
||||
{
|
||||
PageHeap::PageMap::Leaf* rootLeaf = Static::pageheap()->pagemap_.root_[rootIdx];
|
||||
if (rootLeaf == NULL)
|
||||
continue;
|
||||
|
||||
for (int leafIdx = 0; leafIdx < PageHeap::PageMap::LEAF_LENGTH; leafIdx++)
|
||||
{
|
||||
leafCheckCount++;
|
||||
|
||||
tcmalloc_raw::Span* span = (tcmalloc_raw::Span*)rootLeaf->values[leafIdx];
|
||||
if (span != NULL)
|
||||
{
|
||||
int expectedStartPage = (rootIdx * PageHeap::PageMap::LEAF_LENGTH) + leafIdx;
|
||||
RawMarkSpan(span, expectedStartPage);
|
||||
// We may be tempted to advance by span->length here, BUT
|
||||
// let us just scan all leafs becuause span data is
|
||||
// sometimes invalid and a long invalid span can cause
|
||||
// us to skip over an actual valid span
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
for (int pageIdx1 = 0; pageIdx1 < PageHeap::PageMap::INTERIOR_LENGTH; pageIdx1++)
|
||||
{
|
||||
PageHeap::PageMap::Node* node1 = pageHeap->pagemap_.root_->ptrs[pageIdx1];
|
||||
if (node1 == NULL)
|
||||
continue;
|
||||
for (int pageIdx2 = 0; pageIdx2 < PageHeap::PageMap::INTERIOR_LENGTH; pageIdx2++)
|
||||
{
|
||||
PageHeap::PageMap::Node* node2 = node1->ptrs[pageIdx2];
|
||||
if (node2 == NULL)
|
||||
continue;
|
||||
for (int pageIdx3 = 0; pageIdx3 < PageHeap::PageMap::LEAF_LENGTH; pageIdx3++)
|
||||
{
|
||||
leafCheckCount++;
|
||||
|
||||
tcmalloc_raw::Span* span = (tcmalloc_raw::Span*)node2->ptrs[pageIdx3];
|
||||
if (span != NULL)
|
||||
{
|
||||
int expectedStartPage = ((pageIdx1 * PageHeap::PageMap::INTERIOR_LENGTH) + pageIdx2) * PageHeap::PageMap::LEAF_LENGTH + pageIdx3;
|
||||
RawMarkSpan(span, expectedStartPage);
|
||||
// We may be tempted to advance by span->length here, BUT
|
||||
// let us just scan all leafs becuause span data is
|
||||
// sometimes invalid and a long invalid span can cause
|
||||
// us to skip over an actual valid span
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void BFGC::RawReportHandleSpan(tcmalloc_raw::Span* span, int expectedStartPage, int& objectCount, intptr& freeSize, Beefy::Dictionary<bf::System::Type*, AllocInfo>* sizeMap)
|
||||
{
|
||||
if (span->location != tcmalloc_raw::Span::IN_USE)
|
||||
return;
|
||||
|
||||
if (span->start != expectedStartPage)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
intptr pageSize = (intptr)1 << kPageShift;
|
||||
int spanSize = pageSize * span->length;
|
||||
void* spanStart = (void*)((intptr)span->start << kPageShift);
|
||||
void* spanEnd = (void*)((intptr)spanStart + spanSize);
|
||||
void* spanPtr = spanStart;
|
||||
|
||||
BF_LOGASSERT((spanStart >= tcmalloc_raw::PageHeap::sAddressStart) && (spanEnd <= tcmalloc_raw::PageHeap::sAddressEnd));
|
||||
|
||||
int elementSize = Static::sizemap()->ByteSizeForClass(span->sizeclass);
|
||||
if (elementSize == 0)
|
||||
elementSize = spanSize;
|
||||
|
||||
while (spanPtr <= (uint8*)spanEnd - elementSize)
|
||||
{
|
||||
bf::System::DbgRawAllocData* rawAllocData = *(bf::System::DbgRawAllocData**)((uint8*)spanPtr + elementSize - sizeof(intptr));
|
||||
if (rawAllocData != NULL)
|
||||
{
|
||||
bf::System::Type* type = rawAllocData->mType;
|
||||
|
||||
AllocInfo* sizePtr = NULL;
|
||||
|
||||
if (sizeMap == NULL)
|
||||
{
|
||||
int extraDataSize = sizeof(intptr);
|
||||
|
||||
RawLeakInfo rawLeakInfo;
|
||||
rawLeakInfo.mRawAllocData = rawAllocData;
|
||||
rawLeakInfo.mDataPtr = spanPtr;
|
||||
|
||||
if (rawAllocData->mMaxStackTrace == 1)
|
||||
{
|
||||
extraDataSize += sizeof(intptr);
|
||||
rawLeakInfo.mStackTraceCount = 1;
|
||||
rawLeakInfo.mStackTracePtr = (uint8*)spanPtr + elementSize - sizeof(intptr) - sizeof(intptr);
|
||||
}
|
||||
else if (rawAllocData->mMaxStackTrace > 1)
|
||||
{
|
||||
rawLeakInfo.mStackTraceCount = *(intptr*)((uint8*)spanPtr + elementSize - sizeof(intptr) - sizeof(intptr));
|
||||
rawLeakInfo.mStackTracePtr = (uint8*)spanPtr + elementSize - sizeof(intptr) - sizeof(intptr) - rawLeakInfo.mStackTraceCount * sizeof(intptr);
|
||||
extraDataSize += (1 + rawLeakInfo.mStackTraceCount) * sizeof(intptr);
|
||||
}
|
||||
else
|
||||
{
|
||||
rawLeakInfo.mStackTraceCount = 0;
|
||||
rawLeakInfo.mStackTracePtr = NULL;
|
||||
}
|
||||
|
||||
if (rawAllocData->mType != NULL)
|
||||
{
|
||||
int typeSize;
|
||||
if ((gBfRtFlags & BfRtFlags_ObjectHasDebugFlags) != 0)
|
||||
typeSize = rawAllocData->mType->mSize;
|
||||
else
|
||||
typeSize = ((bf::System::Type_NOFLAGS*)rawAllocData->mType)->mSize;
|
||||
rawLeakInfo.mDataCount = (elementSize - extraDataSize) / typeSize;
|
||||
}
|
||||
else
|
||||
rawLeakInfo.mDataCount = 1;
|
||||
|
||||
mSweepInfo.mRawLeaks.Add(rawLeakInfo);
|
||||
mSweepInfo.mLeakCount++;
|
||||
}
|
||||
else
|
||||
{
|
||||
(*sizeMap).TryAdd(type, NULL, &sizePtr);
|
||||
sizePtr->mSize += elementSize;
|
||||
sizePtr->mCount++;
|
||||
}
|
||||
objectCount++;
|
||||
}
|
||||
else
|
||||
{
|
||||
freeSize += elementSize;
|
||||
}
|
||||
|
||||
// Do other stuff
|
||||
spanPtr = (void*)((intptr)spanPtr + elementSize);
|
||||
}
|
||||
}
|
||||
|
||||
void BFGC::RawReportScan(int& objectCount, intptr& freeSize, Beefy::Dictionary<bf::System::Type*, AllocInfo>* sizeMap)
|
||||
{
|
||||
auto pageHeap = Static::pageheap();
|
||||
if (pageHeap == NULL)
|
||||
return;
|
||||
|
||||
#ifdef BF32
|
||||
for (int rootIdx = 0; rootIdx < PageHeap::PageMap::ROOT_LENGTH; rootIdx++)
|
||||
{
|
||||
PageHeap::PageMap::Leaf* rootLeaf = Static::pageheap()->pagemap_.root_[rootIdx];
|
||||
if (rootLeaf == NULL)
|
||||
continue;
|
||||
|
||||
for (int leafIdx = 0; leafIdx < PageHeap::PageMap::LEAF_LENGTH; leafIdx++)
|
||||
{
|
||||
tcmalloc_raw::Span* span = (tcmalloc_raw::Span*)rootLeaf->values[leafIdx];
|
||||
if (span != NULL)
|
||||
{
|
||||
int expectedStartPage = (rootIdx * PageHeap::PageMap::LEAF_LENGTH) + leafIdx;
|
||||
RawReportHandleSpan(span, expectedStartPage, objectCount, freeSize, sizeMap);
|
||||
// We may be tempted to advance by span->length here, BUT
|
||||
// let us just scan all leafs becuause span data is
|
||||
// sometimes invalid and a long invalid span can cause
|
||||
// us to skip over an actual valid span
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
for (int pageIdx1 = 0; pageIdx1 < PageHeap::PageMap::INTERIOR_LENGTH; pageIdx1++)
|
||||
{
|
||||
PageHeap::PageMap::Node* node1 = Static::pageheap()->pagemap_.root_->ptrs[pageIdx1];
|
||||
if (node1 == NULL)
|
||||
continue;
|
||||
for (int pageIdx2 = 0; pageIdx2 < PageHeap::PageMap::INTERIOR_LENGTH; pageIdx2++)
|
||||
{
|
||||
PageHeap::PageMap::Node* node2 = node1->ptrs[pageIdx2];
|
||||
if (node2 == NULL)
|
||||
continue;
|
||||
for (int pageIdx3 = 0; pageIdx3 < PageHeap::PageMap::LEAF_LENGTH; pageIdx3++)
|
||||
{
|
||||
tcmalloc_raw::Span* span = (tcmalloc_raw::Span*)node2->ptrs[pageIdx3];
|
||||
if (span != NULL)
|
||||
{
|
||||
int expectedStartPage = ((pageIdx1 * PageHeap::PageMap::INTERIOR_LENGTH) + pageIdx2) * PageHeap::PageMap::LEAF_LENGTH + pageIdx3;
|
||||
RawReportHandleSpan(span, expectedStartPage, objectCount, freeSize, sizeMap);
|
||||
// We may be tempted to advance by span->length here, BUT
|
||||
// let us just scan all leafs because span data is
|
||||
// sometimes invalid and a long invalid span can cause
|
||||
// us to skip over an actual valid span
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void BFGC::RawReport(String& msg, intptr& freeSize, std::multimap<AllocInfo, bf::System::Type*>& orderedSizeMap)
|
||||
{
|
||||
BP_ZONE("RawReport");
|
||||
|
||||
int objectCount = 0;
|
||||
|
||||
#ifdef BF_GC_VERIFY_SWEEP_IDS
|
||||
maxAllocNum = bf::System::Object::sCurAllocNum;
|
||||
allocIdSet.clear();
|
||||
#endif
|
||||
|
||||
int leafCheckCount = 0;
|
||||
bool overflowed = false;
|
||||
|
||||
Beefy::Dictionary<bf::System::Type*, AllocInfo> sizeMap;
|
||||
|
||||
RawReportScan(objectCount, freeSize, &sizeMap);
|
||||
for (auto& pair : sizeMap)
|
||||
{
|
||||
orderedSizeMap.insert(std::make_pair(pair.mValue, pair.mKey));
|
||||
}
|
||||
|
||||
msg += Beefy::StrFormat(" Live Non-Objects %d\n", objectCount);
|
||||
}
|
||||
|
||||
extern Beefy::StringT<0> gDbgErrorString;
|
||||
|
||||
void BFGC::RawShutdown()
|
||||
{
|
||||
BF_ASSERT(!mRunning);
|
||||
|
||||
while (!gDeferredFrees.IsEmpty())
|
||||
{
|
||||
DeferredFreeEntry entry = gDeferredFrees.PopBack();
|
||||
gDeferredObjectFreeSize -= entry.mAllocSize;
|
||||
tc_free(entry.mObject);
|
||||
}
|
||||
BF_ASSERT(gDeferredObjectFreeSize == 0);
|
||||
|
||||
int objectCount = 0;
|
||||
intptr freeSize = 0;
|
||||
mSweepInfo.mLeakCount = 0;
|
||||
RawReportScan(objectCount, freeSize, NULL);
|
||||
|
||||
if (mSweepInfo.mLeakCount > 0)
|
||||
{
|
||||
Beefy::String errorStr = StrFormat("%d raw memory leak%s detected, details in Output panel.",
|
||||
mSweepInfo.mLeakCount, (mSweepInfo.mLeakCount != 1) ? "s" : "");
|
||||
gDbgErrorString = errorStr;
|
||||
gDbgErrorString += "\n";
|
||||
|
||||
for (auto& rawLeak : mSweepInfo.mRawLeaks)
|
||||
{
|
||||
Beefy::String typeName;
|
||||
if (rawLeak.mRawAllocData->mType != NULL)
|
||||
typeName = rawLeak.mRawAllocData->mType->GetFullName() + "*";
|
||||
else if (rawLeak.mRawAllocData == &sObjectAllocData)
|
||||
typeName = "System.Object";
|
||||
else
|
||||
typeName = "uint8*";
|
||||
errorStr += "\x1";
|
||||
String leakStr = StrFormat("(%s)0x%@", typeName.c_str(), rawLeak.mDataPtr);
|
||||
if (rawLeak.mDataCount > 1)
|
||||
leakStr += StrFormat(",%d", rawLeak.mDataCount);
|
||||
|
||||
errorStr += StrFormat("LEAK\t%s\n", leakStr.c_str());
|
||||
errorStr += StrFormat(" %s\n", leakStr.c_str());
|
||||
|
||||
if (rawLeak.mStackTraceCount > 0)
|
||||
{
|
||||
errorStr += "\x1";
|
||||
errorStr += StrFormat("LEAK\t(System.CallStackAddr*)0x%@", rawLeak.mStackTracePtr);
|
||||
if (rawLeak.mStackTraceCount == 1)
|
||||
errorStr += StrFormat(", nm");
|
||||
else
|
||||
errorStr += StrFormat(", %d, na", rawLeak.mStackTraceCount);
|
||||
errorStr += StrFormat("\n [AllocStackTrace]\n");
|
||||
}
|
||||
|
||||
if (gDbgErrorString.length() < 256)
|
||||
gDbgErrorString += StrFormat(" %s\n", leakStr.c_str());
|
||||
}
|
||||
|
||||
BF_ASSERT(mSweepInfo.mLeakCount > 0);
|
||||
|
||||
gBfRtCallbacks.SetErrorString(gDbgErrorString.c_str());
|
||||
gBfRtCallbacks.DebugMessageData_SetupError(errorStr.c_str(), 0);
|
||||
BF_DEBUG_BREAK();
|
||||
}
|
||||
else
|
||||
{
|
||||
BF_ASSERT(gRawAllocSize == 0);
|
||||
}
|
||||
}
|
||||
|
||||
inline void* BF_do_malloc_pages(ThreadCache* heap, size_t& size)
|
||||
{
|
||||
void* result;
|
||||
bool report_large;
|
||||
|
||||
heap->requested_bytes_ += size;
|
||||
Length num_pages = TCMALLOC_NAMESPACE::pages(size);
|
||||
size = num_pages << kPageShift;
|
||||
|
||||
if ((TCMALLOC_NAMESPACE::FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) {
|
||||
result = DoSampledAllocation(size);
|
||||
|
||||
SpinLockHolder h(Static::pageheap_lock());
|
||||
report_large = should_report_large(num_pages);
|
||||
}
|
||||
else {
|
||||
SpinLockHolder h(Static::pageheap_lock());
|
||||
Span* span = Static::pageheap()->New(num_pages);
|
||||
result = (UNLIKELY(span == NULL) ? NULL : SpanToMallocResult(span));
|
||||
report_large = should_report_large(num_pages);
|
||||
}
|
||||
|
||||
if (report_large) {
|
||||
ReportLargeAlloc(num_pages, result);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
inline void* BF_do_malloc_small(ThreadCache* heap, size_t& size)
|
||||
{
|
||||
if (size == 0)
|
||||
size = (int)sizeof(void*);
|
||||
|
||||
ASSERT(Static::IsInited());
|
||||
ASSERT(heap != NULL);
|
||||
heap->requested_bytes_ += size;
|
||||
size_t cl = Static::sizemap()->SizeClass(size);
|
||||
size = Static::sizemap()->class_to_size(cl);
|
||||
|
||||
void* result;
|
||||
if ((TCMALLOC_NAMESPACE::FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) {
|
||||
result = DoSampledAllocation(size);
|
||||
}
|
||||
else {
|
||||
// The common case, and also the simplest. This just pops the
|
||||
// size-appropriate freelist, after replenishing it if it's empty.
|
||||
result = CheckedMallocResult(heap->Allocate(size, cl));
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void* BfRawAllocate(intptr size, bf::System::DbgRawAllocData* rawAllocData, void* stackTraceInfo, int stackTraceCount)
|
||||
{
|
||||
size_t totalSize = size;
|
||||
totalSize += sizeof(intptr);
|
||||
if (rawAllocData->mMaxStackTrace == 1)
|
||||
totalSize += sizeof(intptr);
|
||||
else if (rawAllocData->mMaxStackTrace > 1)
|
||||
totalSize += (1 + stackTraceCount) * sizeof(intptr);
|
||||
|
||||
void* result;
|
||||
if (ThreadCache::have_tls &&
|
||||
LIKELY(totalSize < ThreadCache::MinSizeForSlowPath()))
|
||||
{
|
||||
result = BF_do_malloc_small(ThreadCache::GetCacheWhichMustBePresent(), totalSize);
|
||||
}
|
||||
else if (totalSize <= kMaxSize)
|
||||
{
|
||||
result = BF_do_malloc_small(ThreadCache::GetCache(), totalSize);
|
||||
}
|
||||
else
|
||||
{
|
||||
result = BF_do_malloc_pages(ThreadCache::GetCache(), totalSize);
|
||||
}
|
||||
|
||||
if (rawAllocData->mMaxStackTrace == 1)
|
||||
{
|
||||
memcpy((uint8*)result + totalSize - sizeof(intptr) - sizeof(intptr), stackTraceInfo, sizeof(intptr));
|
||||
}
|
||||
else if (rawAllocData->mMaxStackTrace > 1)
|
||||
{
|
||||
memcpy((uint8*)result + totalSize - sizeof(intptr) - sizeof(intptr), &stackTraceCount, sizeof(intptr));
|
||||
memcpy((uint8*)result + totalSize - sizeof(intptr) - sizeof(intptr) - stackTraceCount*sizeof(intptr), stackTraceInfo, stackTraceCount*sizeof(intptr));
|
||||
}
|
||||
|
||||
memcpy((uint8*)result + totalSize - sizeof(intptr), &rawAllocData, sizeof(intptr));
|
||||
|
||||
BfpSystem_InterlockedExchangeAdd32((uint32*)&gRawAllocSize, (uint32)totalSize);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void BfRawFree(void* ptr)
|
||||
{
|
||||
const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
|
||||
size_t cl = Static::pageheap()->GetSizeClassIfCached(p);
|
||||
int allocSize = 0;
|
||||
if (cl == 0)
|
||||
{
|
||||
auto span = Static::pageheap()->GetDescriptor(p);
|
||||
if (span != NULL)
|
||||
{
|
||||
cl = span->sizeclass;
|
||||
if (cl == 0)
|
||||
{
|
||||
allocSize = span->length << kPageShift;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (cl != 0)
|
||||
allocSize = Static::sizemap()->class_to_size(cl);
|
||||
|
||||
if (allocSize == 0)
|
||||
{
|
||||
Beefy::String err = Beefy::StrFormat("Memory deallocation requested at invalid address %@", ptr);
|
||||
BF_FATAL(err);
|
||||
}
|
||||
|
||||
if (allocSize != 0)
|
||||
{
|
||||
BfpSystem_InterlockedExchangeAdd32((uint32*)&gRawAllocSize, (uint32)-allocSize);
|
||||
|
||||
// Clear out the dbg alloc data at the end
|
||||
void** dbgAllocDataAddr = (void**)((uint8*)ptr + allocSize - sizeof(void*));
|
||||
if (*dbgAllocDataAddr == 0)
|
||||
{
|
||||
Beefy::String err = Beefy::StrFormat("Memory deallocation requested at %@ but no allocation is recorded. Double delete?", ptr);
|
||||
BF_FATAL(err);
|
||||
}
|
||||
else if ((*dbgAllocDataAddr == &sObjectAllocData) && ((gBFGC.mMaxRawDeferredObjectFreePercentage != 0) || (!gDeferredFrees.IsEmpty())))
|
||||
{
|
||||
*dbgAllocDataAddr = NULL;
|
||||
|
||||
AutoCrit autoCrit(gBFGC.mCritSect);
|
||||
gMaxRawAllocSize = BF_MAX(gMaxRawAllocSize, gRawAllocSize);
|
||||
gDeferredObjectFreeSize += allocSize;
|
||||
|
||||
DeferredFreeEntry entry;
|
||||
entry.mObject = (bf::System::Object*)ptr;
|
||||
entry.mAllocSize = allocSize;
|
||||
gDeferredFrees.Add(entry);
|
||||
|
||||
int maxDeferredSize = gMaxRawAllocSize * gBFGC.mMaxRawDeferredObjectFreePercentage / 100;
|
||||
while (gDeferredObjectFreeSize > maxDeferredSize)
|
||||
{
|
||||
DeferredFreeEntry entry = gDeferredFrees.PopBack();
|
||||
gDeferredObjectFreeSize -= entry.mAllocSize;
|
||||
memset(entry.mObject, 0xDD, allocSize - sizeof(void*));
|
||||
tc_free(entry.mObject);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
*dbgAllocDataAddr = NULL;
|
||||
BF_FULL_MEMORY_FENCE();
|
||||
memset(ptr, 0xDD, allocSize - sizeof(void*));
|
||||
}
|
||||
|
||||
tc_free(ptr);
|
||||
}
|
||||
|
||||
#else // BF_GC_SUPPORTED
|
||||
|
||||
void* BfRawAllocate(intptr size, bf::System::DbgRawAllocData* rawAllocData, void* stackTraceInfo, int stackTraceCount)
|
||||
{
|
||||
return malloc(size);
|
||||
}
|
||||
|
||||
void BfRawFree(void* ptr)
|
||||
{
|
||||
free(ptr);
|
||||
}
|
||||
|
||||
#endif
|
Loading…
Add table
Add a link
Reference in a new issue