2019-08-23 11:56:54 -07:00
# define _WIN32_WINNT _WIN32_WINNT_WIN8
// This spits out interesting stats periodically to the console
# define BF_GC_PRINTSTATS
// Disable efficient new mass-freeing in TCMalloc
//#define BF_GC_USE_OLD_FREE
# ifdef BF_DEBUG
// Useful for tracing down memory corruption -- memory isn't returned to TCMalloc, it's just marked as freed. You'll run out eventually
//#define BF_NO_FREE_MEMORY
// Old and not too useful
//#define BG_GC_TRACKPTRS
# endif
# if defined BF_OBJECT_TRACK_ALLOCNUM && defined BF_NO_FREE_MEMORY
//#define BF_GC_VERIFY_SWEEP_IDS
# endif
//#define BF_SECTION_NURSERY
# ifdef BF_PLATFORM_WINDOWS
# include <direct.h>
# endif
# include "gc.h"
bf : : System : : DbgRawAllocData sEmptyAllocData = { 0 } ;
bf : : System : : DbgRawAllocData sObjectAllocData = { 0 } ;
# ifdef BF_GC_SUPPORTED
# include <fstream>
# include "BeefySysLib/Common.h"
# include "BeefySysLib/BFApp.h"
# include "BeefySysLib/util/CritSect.h"
# include "BeefySysLib/util/BeefPerf.h"
# include "BeefySysLib/util/HashSet.h"
# include "BeefySysLib/util/Dictionary.h"
# include "BeefySysLib/util/Deque.h"
# include <unordered_set>
# include "../rt/BfObjects.h"
# include "../rt/Thread.h"
# include <map>
# define TCMALLOC_NO_MALLOCGUARD
# define TCMALLOC_NAMESPACE tcmalloc_raw
# define TCMALLOC_EXTERN static
# include "gperftools/src/tcmalloc.cc"
using namespace tcmalloc_raw ;
using namespace Beefy ;
struct DeferredFreeEntry
{
bf : : System : : Object * mObject ;
2019-12-05 06:51:11 -08:00
intptr mAllocSize ;
2019-08-23 11:56:54 -07:00
} ;
static Beefy : : Deque < DeferredFreeEntry > gDeferredFrees ;
2019-12-05 06:51:11 -08:00
static intptr gRawAllocSize = 0 ;
static intptr gMaxRawAllocSize = 0 ;
static intptr gDeferredObjectFreeSize = 0 ;
2019-08-23 11:56:54 -07:00
void BFGC : : RawInit ( )
{
if ( UNLIKELY ( Static : : pageheap ( ) = = NULL ) ) ThreadCache : : InitModule ( ) ;
ThreadCache : : InitTSD ( ) ;
gGCDbgData . mRawRootPtr = Static : : pageheap ( ) - > pagemap_ . root_ ;
gGCDbgData . mRawObjectSentinel = & sObjectAllocData ;
}
void BFGC : : RawMarkSpan ( tcmalloc_raw : : Span * span , int expectedStartPage )
{
2019-08-27 08:04:41 -07:00
if ( ( gBfRtDbgFlags & BfRtFlags_ObjectHasDebugFlags ) = = 0 )
2019-08-23 11:56:54 -07:00
return ;
if ( span - > location ! = tcmalloc_raw : : Span : : IN_USE )
return ;
if ( span - > start ! = expectedStartPage )
{
// This check covers when a new multi-page span is being put into place
// and we catch after the first block, and it also catches the case
// when the allocator splits a span and the pagemap can hold a reference
// to a span that no longer covers that location.
// For both of these cases we ignore the span. Remember, the worst case
// here is that we'll miss a sweep of an object, which would just delay it's
// cleanup until next GC cycle. Because the GC is the sole freer of spans,
// there can never be a case where we find a valid span and then the span
// changes sizeclass or location before we can scan the memory it points to.
//
// This also covers the case where a page spans over a radix map section and
// we catch it on an outer loop again
return ;
}
intptr pageSize = ( intptr ) 1 < < kPageShift ;
2019-12-05 06:51:11 -08:00
intptr spanSize = pageSize * span - > length ;
2019-08-23 11:56:54 -07:00
void * spanStart = ( void * ) ( ( intptr ) span - > start < < kPageShift ) ;
void * spanEnd = ( void * ) ( ( intptr ) spanStart + spanSize ) ;
void * spanPtr = spanStart ;
BF_LOGASSERT ( ( spanStart > = tcmalloc_raw : : PageHeap : : sAddressStart ) & & ( spanEnd < = tcmalloc_raw : : PageHeap : : sAddressEnd ) ) ;
2019-12-05 06:51:11 -08:00
intptr elementSize = Static : : sizemap ( ) - > ByteSizeForClass ( span - > sizeclass ) ;
2019-08-23 11:56:54 -07:00
if ( elementSize = = 0 )
elementSize = spanSize ;
BF_LOGASSERT ( elementSize > = sizeof ( bf : : System : : Object ) ) ;
while ( spanPtr < = ( uint8 * ) spanEnd - elementSize )
{
bf : : System : : DbgRawAllocData * rawAllocData = * ( bf : : System : : DbgRawAllocData * * ) ( ( uint8 * ) spanPtr + elementSize - sizeof ( intptr ) ) ;
if ( rawAllocData ! = NULL )
{
if ( rawAllocData - > mMarkFunc ! = NULL )
2025-01-28 14:49:15 -08:00
{
2019-12-05 06:51:11 -08:00
intptr extraDataSize = sizeof ( intptr ) ;
2019-08-23 11:56:54 -07:00
if ( rawAllocData - > mMaxStackTrace = = 1 )
{
2025-01-28 14:49:15 -08:00
extraDataSize + = sizeof ( intptr ) + 2 ;
extraDataSize + = * ( uint16 * ) ( ( uint8 * ) spanPtr + elementSize - sizeof ( intptr ) - sizeof ( intptr ) - 2 ) ;
2019-08-23 11:56:54 -07:00
}
else if ( rawAllocData - > mMaxStackTrace > 1 )
{
2019-12-05 06:51:11 -08:00
intptr stackTraceCount = * ( intptr * ) ( ( uint8 * ) spanPtr + elementSize - sizeof ( intptr ) - sizeof ( intptr ) ) ;
2025-01-28 14:49:15 -08:00
extraDataSize + = ( 1 + stackTraceCount ) * sizeof ( intptr ) + 2 ;
extraDataSize + = * ( uint16 * ) ( ( uint8 * ) spanPtr + elementSize - sizeof ( intptr ) - sizeof ( intptr ) - stackTraceCount * sizeof ( intptr ) - 2 ) ;
2019-08-23 11:56:54 -07:00
}
2020-04-09 14:37:17 -07:00
struct MarkTarget
{
} ;
typedef void ( MarkTarget : : * MarkFunc ) ( ) ;
2019-08-23 11:56:54 -07:00
MarkFunc markFunc = * ( MarkFunc * ) & rawAllocData - > mMarkFunc ;
2025-01-28 14:49:15 -08:00
auto typeData = rawAllocData - > mType - > GetTypeData ( ) ;
if ( typeData ! = NULL )
2020-06-10 05:44:38 -07:00
{
2025-01-28 14:49:15 -08:00
// It's possible we can overestimate elemCount, particularly for large allocations. This doesn't cause a problem
// because we can safely mark on complete random memory -- pointer values are always validated before being followed
intptr elemStride = BF_ALIGN ( typeData - > mSize , typeData - > mAlign ) ;
if ( elemStride > 0 )
2020-06-10 05:44:38 -07:00
{
2025-01-28 14:49:15 -08:00
intptr dataSize = elementSize - extraDataSize ;
intptr elemCount = dataSize / elemStride ;
for ( intptr elemIdx = 0 ; elemIdx < elemCount ; elemIdx + + )
{
( ( ( MarkTarget * ) ( ( uint8 * ) spanPtr + elemIdx * elemStride ) ) - > * markFunc ) ( ) ;
}
2020-06-10 05:44:38 -07:00
}
2019-08-23 11:56:54 -07:00
}
}
}
spanPtr = ( void * ) ( ( intptr ) spanPtr + elementSize ) ;
}
}
void BFGC : : RawMarkAll ( )
{
//BP_ZONE("Sweep");
mCurLiveObjectCount = 0 ;
# ifdef BF_GC_VERIFY_SWEEP_IDS
maxAllocNum = bf : : System : : Object : : sCurAllocNum ;
allocIdSet . clear ( ) ;
# endif
auto pageHeap = Static : : pageheap ( ) ;
if ( pageHeap = = NULL )
return ;
2019-12-05 06:51:11 -08:00
intptr leafCheckCount = 0 ;
2019-08-23 11:56:54 -07:00
# ifdef BF32
for ( int rootIdx = 0 ; rootIdx < PageHeap : : PageMap : : ROOT_LENGTH ; rootIdx + + )
{
PageHeap : : PageMap : : Leaf * rootLeaf = Static : : pageheap ( ) - > pagemap_ . root_ [ rootIdx ] ;
if ( rootLeaf = = NULL )
continue ;
for ( int leafIdx = 0 ; leafIdx < PageHeap : : PageMap : : LEAF_LENGTH ; leafIdx + + )
{
leafCheckCount + + ;
tcmalloc_raw : : Span * span = ( tcmalloc_raw : : Span * ) rootLeaf - > values [ leafIdx ] ;
if ( span ! = NULL )
{
int expectedStartPage = ( rootIdx * PageHeap : : PageMap : : LEAF_LENGTH ) + leafIdx ;
RawMarkSpan ( span , expectedStartPage ) ;
// We may be tempted to advance by span->length here, BUT
// let us just scan all leafs becuause span data is
// sometimes invalid and a long invalid span can cause
// us to skip over an actual valid span
}
}
}
# else
for ( int pageIdx1 = 0 ; pageIdx1 < PageHeap : : PageMap : : INTERIOR_LENGTH ; pageIdx1 + + )
{
PageHeap : : PageMap : : Node * node1 = pageHeap - > pagemap_ . root_ - > ptrs [ pageIdx1 ] ;
if ( node1 = = NULL )
continue ;
for ( int pageIdx2 = 0 ; pageIdx2 < PageHeap : : PageMap : : INTERIOR_LENGTH ; pageIdx2 + + )
{
PageHeap : : PageMap : : Node * node2 = node1 - > ptrs [ pageIdx2 ] ;
if ( node2 = = NULL )
continue ;
for ( int pageIdx3 = 0 ; pageIdx3 < PageHeap : : PageMap : : LEAF_LENGTH ; pageIdx3 + + )
{
leafCheckCount + + ;
tcmalloc_raw : : Span * span = ( tcmalloc_raw : : Span * ) node2 - > ptrs [ pageIdx3 ] ;
if ( span ! = NULL )
{
int expectedStartPage = ( ( pageIdx1 * PageHeap : : PageMap : : INTERIOR_LENGTH ) + pageIdx2 ) * PageHeap : : PageMap : : LEAF_LENGTH + pageIdx3 ;
RawMarkSpan ( span , expectedStartPage ) ;
// We may be tempted to advance by span->length here, BUT
// let us just scan all leafs becuause span data is
// sometimes invalid and a long invalid span can cause
// us to skip over an actual valid span
}
}
}
}
# endif
}
void BFGC : : RawReportHandleSpan ( tcmalloc_raw : : Span * span , int expectedStartPage , int & objectCount , intptr & freeSize , Beefy : : Dictionary < bf : : System : : Type * , AllocInfo > * sizeMap )
{
if ( span - > location ! = tcmalloc_raw : : Span : : IN_USE )
return ;
if ( span - > start ! = expectedStartPage )
{
return ;
}
intptr pageSize = ( intptr ) 1 < < kPageShift ;
2019-12-05 06:51:11 -08:00
intptr spanSize = pageSize * span - > length ;
2019-08-23 11:56:54 -07:00
void * spanStart = ( void * ) ( ( intptr ) span - > start < < kPageShift ) ;
void * spanEnd = ( void * ) ( ( intptr ) spanStart + spanSize ) ;
void * spanPtr = spanStart ;
BF_LOGASSERT ( ( spanStart > = tcmalloc_raw : : PageHeap : : sAddressStart ) & & ( spanEnd < = tcmalloc_raw : : PageHeap : : sAddressEnd ) ) ;
2019-12-05 06:51:11 -08:00
intptr elementSize = Static : : sizemap ( ) - > ByteSizeForClass ( span - > sizeclass ) ;
2019-08-23 11:56:54 -07:00
if ( elementSize = = 0 )
elementSize = spanSize ;
while ( spanPtr < = ( uint8 * ) spanEnd - elementSize )
{
bf : : System : : DbgRawAllocData * rawAllocData = * ( bf : : System : : DbgRawAllocData * * ) ( ( uint8 * ) spanPtr + elementSize - sizeof ( intptr ) ) ;
if ( rawAllocData ! = NULL )
{
bf : : System : : Type * type = rawAllocData - > mType ;
AllocInfo * sizePtr = NULL ;
if ( sizeMap = = NULL )
{
2019-12-05 06:51:11 -08:00
intptr extraDataSize = sizeof ( intptr ) ;
2019-08-23 11:56:54 -07:00
RawLeakInfo rawLeakInfo ;
rawLeakInfo . mRawAllocData = rawAllocData ;
rawLeakInfo . mDataPtr = spanPtr ;
if ( rawAllocData - > mMaxStackTrace = = 1 )
{
extraDataSize + = sizeof ( intptr ) ;
rawLeakInfo . mStackTraceCount = 1 ;
rawLeakInfo . mStackTracePtr = ( uint8 * ) spanPtr + elementSize - sizeof ( intptr ) - sizeof ( intptr ) ;
}
else if ( rawAllocData - > mMaxStackTrace > 1 )
{
rawLeakInfo . mStackTraceCount = * ( intptr * ) ( ( uint8 * ) spanPtr + elementSize - sizeof ( intptr ) - sizeof ( intptr ) ) ;
rawLeakInfo . mStackTracePtr = ( uint8 * ) spanPtr + elementSize - sizeof ( intptr ) - sizeof ( intptr ) - rawLeakInfo . mStackTraceCount * sizeof ( intptr ) ;
extraDataSize + = ( 1 + rawLeakInfo . mStackTraceCount ) * sizeof ( intptr ) ;
}
else
{
rawLeakInfo . mStackTraceCount = 0 ;
rawLeakInfo . mStackTracePtr = NULL ;
}
if ( rawAllocData - > mType ! = NULL )
{
2025-01-28 14:49:15 -08:00
bf : : System : : Type_NOFLAGS * typeData = rawAllocData - > mType - > GetTypeData ( ) ;
if ( typeData ! = NULL )
{
intptr typeSize = typeData - > mSize ;
if ( typeSize > 0 )
rawLeakInfo . mDataCount = ( elementSize - extraDataSize ) / typeSize ;
}
2019-08-23 11:56:54 -07:00
}
else
rawLeakInfo . mDataCount = 1 ;
mSweepInfo . mRawLeaks . Add ( rawLeakInfo ) ;
mSweepInfo . mLeakCount + + ;
}
else
{
( * sizeMap ) . TryAdd ( type , NULL , & sizePtr ) ;
2019-11-07 10:01:23 -08:00
sizePtr - > mRawSize + = elementSize ;
sizePtr - > mRawCount + + ;
2019-08-23 11:56:54 -07:00
}
objectCount + + ;
}
else
{
freeSize + = elementSize ;
}
// Do other stuff
spanPtr = ( void * ) ( ( intptr ) spanPtr + elementSize ) ;
}
}
void BFGC : : RawReportScan ( int & objectCount , intptr & freeSize , Beefy : : Dictionary < bf : : System : : Type * , AllocInfo > * sizeMap )
{
auto pageHeap = Static : : pageheap ( ) ;
if ( pageHeap = = NULL )
return ;
# ifdef BF32
for ( int rootIdx = 0 ; rootIdx < PageHeap : : PageMap : : ROOT_LENGTH ; rootIdx + + )
{
PageHeap : : PageMap : : Leaf * rootLeaf = Static : : pageheap ( ) - > pagemap_ . root_ [ rootIdx ] ;
if ( rootLeaf = = NULL )
continue ;
for ( int leafIdx = 0 ; leafIdx < PageHeap : : PageMap : : LEAF_LENGTH ; leafIdx + + )
{
tcmalloc_raw : : Span * span = ( tcmalloc_raw : : Span * ) rootLeaf - > values [ leafIdx ] ;
if ( span ! = NULL )
{
int expectedStartPage = ( rootIdx * PageHeap : : PageMap : : LEAF_LENGTH ) + leafIdx ;
RawReportHandleSpan ( span , expectedStartPage , objectCount , freeSize , sizeMap ) ;
// We may be tempted to advance by span->length here, BUT
// let us just scan all leafs becuause span data is
// sometimes invalid and a long invalid span can cause
// us to skip over an actual valid span
}
}
}
# else
for ( int pageIdx1 = 0 ; pageIdx1 < PageHeap : : PageMap : : INTERIOR_LENGTH ; pageIdx1 + + )
{
PageHeap : : PageMap : : Node * node1 = Static : : pageheap ( ) - > pagemap_ . root_ - > ptrs [ pageIdx1 ] ;
if ( node1 = = NULL )
continue ;
for ( int pageIdx2 = 0 ; pageIdx2 < PageHeap : : PageMap : : INTERIOR_LENGTH ; pageIdx2 + + )
{
PageHeap : : PageMap : : Node * node2 = node1 - > ptrs [ pageIdx2 ] ;
if ( node2 = = NULL )
continue ;
for ( int pageIdx3 = 0 ; pageIdx3 < PageHeap : : PageMap : : LEAF_LENGTH ; pageIdx3 + + )
{
tcmalloc_raw : : Span * span = ( tcmalloc_raw : : Span * ) node2 - > ptrs [ pageIdx3 ] ;
if ( span ! = NULL )
{
int expectedStartPage = ( ( pageIdx1 * PageHeap : : PageMap : : INTERIOR_LENGTH ) + pageIdx2 ) * PageHeap : : PageMap : : LEAF_LENGTH + pageIdx3 ;
RawReportHandleSpan ( span , expectedStartPage , objectCount , freeSize , sizeMap ) ;
// We may be tempted to advance by span->length here, BUT
// let us just scan all leafs because span data is
// sometimes invalid and a long invalid span can cause
// us to skip over an actual valid span
}
}
}
}
# endif
}
void BFGC : : RawReport ( String & msg , intptr & freeSize , std : : multimap < AllocInfo , bf : : System : : Type * > & orderedSizeMap )
{
BP_ZONE ( " RawReport " ) ;
int objectCount = 0 ;
# ifdef BF_GC_VERIFY_SWEEP_IDS
maxAllocNum = bf : : System : : Object : : sCurAllocNum ;
allocIdSet . clear ( ) ;
# endif
2019-12-05 06:51:11 -08:00
intptr leafCheckCount = 0 ;
2019-08-23 11:56:54 -07:00
bool overflowed = false ;
Beefy : : Dictionary < bf : : System : : Type * , AllocInfo > sizeMap ;
RawReportScan ( objectCount , freeSize , & sizeMap ) ;
for ( auto & pair : sizeMap )
{
orderedSizeMap . insert ( std : : make_pair ( pair . mValue , pair . mKey ) ) ;
}
msg + = Beefy : : StrFormat ( " Live Non-Objects %d \n " , objectCount ) ;
}
extern Beefy : : StringT < 0 > gDbgErrorString ;
void BFGC : : RawShutdown ( )
{
BF_ASSERT ( ! mRunning ) ;
while ( ! gDeferredFrees . IsEmpty ( ) )
{
DeferredFreeEntry entry = gDeferredFrees . PopBack ( ) ;
gDeferredObjectFreeSize - = entry . mAllocSize ;
tc_free ( entry . mObject ) ;
}
BF_ASSERT ( gDeferredObjectFreeSize = = 0 ) ;
int objectCount = 0 ;
intptr freeSize = 0 ;
mSweepInfo . mLeakCount = 0 ;
RawReportScan ( objectCount , freeSize , NULL ) ;
if ( mSweepInfo . mLeakCount > 0 )
{
2020-11-16 17:52:25 -08:00
Beefy : : String errorStr = StrFormat ( " %d raw memory leak%s detected. \n Mouse over an 'i' icon in the Output panel to view the leaked memory or the associated allocation stack trace. " ,
2019-08-23 11:56:54 -07:00
mSweepInfo . mLeakCount , ( mSweepInfo . mLeakCount ! = 1 ) ? " s " : " " ) ;
gDbgErrorString = errorStr ;
gDbgErrorString + = " \n " ;
2021-12-06 12:35:13 -08:00
int passLeakCount = 0 ;
2019-08-23 11:56:54 -07:00
for ( auto & rawLeak : mSweepInfo . mRawLeaks )
{
2021-12-06 12:35:13 -08:00
if ( passLeakCount = = 20000 ) // Only display so many...
break ;
2019-08-23 11:56:54 -07:00
Beefy : : String typeName ;
if ( rawLeak . mRawAllocData - > mType ! = NULL )
typeName = rawLeak . mRawAllocData - > mType - > GetFullName ( ) + " * " ;
else if ( rawLeak . mRawAllocData = = & sObjectAllocData )
typeName = " System.Object " ;
else
typeName = " uint8* " ;
errorStr + = " \ x1 " ;
String leakStr = StrFormat ( " (%s)0x%@ " , typeName . c_str ( ) , rawLeak . mDataPtr ) ;
if ( rawLeak . mDataCount > 1 )
leakStr + = StrFormat ( " ,%d " , rawLeak . mDataCount ) ;
errorStr + = StrFormat ( " LEAK \t %s \n " , leakStr . c_str ( ) ) ;
errorStr + = StrFormat ( " %s \n " , leakStr . c_str ( ) ) ;
if ( rawLeak . mStackTraceCount > 0 )
{
errorStr + = " \ x1 " ;
errorStr + = StrFormat ( " LEAK \t (System.CallStackAddr*)0x%@ " , rawLeak . mStackTracePtr ) ;
if ( rawLeak . mStackTraceCount = = 1 )
errorStr + = StrFormat ( " , nm " ) ;
else
errorStr + = StrFormat ( " , %d, na " , rawLeak . mStackTraceCount ) ;
errorStr + = StrFormat ( " \n [AllocStackTrace] \n " ) ;
}
if ( gDbgErrorString . length ( ) < 256 )
gDbgErrorString + = StrFormat ( " %s \n " , leakStr . c_str ( ) ) ;
2021-12-06 12:35:13 -08:00
passLeakCount + + ;
2019-08-23 11:56:54 -07:00
}
BF_ASSERT ( mSweepInfo . mLeakCount > 0 ) ;
2019-08-27 08:04:41 -07:00
gBfRtDbgCallbacks . SetErrorString ( gDbgErrorString . c_str ( ) ) ;
gBfRtDbgCallbacks . DebugMessageData_SetupError ( errorStr . c_str ( ) , 0 ) ;
2019-08-23 11:56:54 -07:00
BF_DEBUG_BREAK ( ) ;
}
else
{
BF_ASSERT ( gRawAllocSize = = 0 ) ;
}
}
inline void * BF_do_malloc_pages ( ThreadCache * heap , size_t & size )
{
void * result ;
bool report_large ;
heap - > requested_bytes_ + = size ;
Length num_pages = TCMALLOC_NAMESPACE : : pages ( size ) ;
size = num_pages < < kPageShift ;
if ( ( TCMALLOC_NAMESPACE : : FLAGS_tcmalloc_sample_parameter > 0 ) & & heap - > SampleAllocation ( size ) ) {
result = DoSampledAllocation ( size ) ;
SpinLockHolder h ( Static : : pageheap_lock ( ) ) ;
report_large = should_report_large ( num_pages ) ;
}
else {
SpinLockHolder h ( Static : : pageheap_lock ( ) ) ;
Span * span = Static : : pageheap ( ) - > New ( num_pages ) ;
result = ( UNLIKELY ( span = = NULL ) ? NULL : SpanToMallocResult ( span ) ) ;
report_large = should_report_large ( num_pages ) ;
}
if ( report_large ) {
ReportLargeAlloc ( num_pages , result ) ;
}
return result ;
}
inline void * BF_do_malloc_small ( ThreadCache * heap , size_t & size )
{
if ( size = = 0 )
size = ( int ) sizeof ( void * ) ;
ASSERT ( Static : : IsInited ( ) ) ;
ASSERT ( heap ! = NULL ) ;
heap - > requested_bytes_ + = size ;
size_t cl = Static : : sizemap ( ) - > SizeClass ( size ) ;
size = Static : : sizemap ( ) - > class_to_size ( cl ) ;
void * result ;
if ( ( TCMALLOC_NAMESPACE : : FLAGS_tcmalloc_sample_parameter > 0 ) & & heap - > SampleAllocation ( size ) ) {
result = DoSampledAllocation ( size ) ;
}
else {
// The common case, and also the simplest. This just pops the
// size-appropriate freelist, after replenishing it if it's empty.
result = CheckedMallocResult ( heap - > Allocate ( size , cl ) ) ;
}
return result ;
}
void * BfRawAllocate ( intptr size , bf : : System : : DbgRawAllocData * rawAllocData , void * stackTraceInfo , int stackTraceCount )
{
size_t totalSize = size ;
2022-06-02 10:55:29 -07:00
totalSize + = sizeof ( intptr ) + 4 ; // int16 protectBytes, <unused bytes>, int16 sizeOffset, <stack trace data>, DbgRawAllocData ptr
2019-08-23 11:56:54 -07:00
if ( rawAllocData - > mMaxStackTrace = = 1 )
totalSize + = sizeof ( intptr ) ;
else if ( rawAllocData - > mMaxStackTrace > 1 )
totalSize + = ( 1 + stackTraceCount ) * sizeof ( intptr ) ;
void * result ;
if ( ThreadCache : : have_tls & &
LIKELY ( totalSize < ThreadCache : : MinSizeForSlowPath ( ) ) )
{
result = BF_do_malloc_small ( ThreadCache : : GetCacheWhichMustBePresent ( ) , totalSize ) ;
}
else if ( totalSize < = kMaxSize )
{
result = BF_do_malloc_small ( ThreadCache : : GetCache ( ) , totalSize ) ;
}
else
{
result = BF_do_malloc_pages ( ThreadCache : : GetCache ( ) , totalSize ) ;
}
2022-06-02 10:55:29 -07:00
* ( uint16 * ) ( ( uint8 * ) result + size ) = 0xBFBF ;
2019-08-23 11:56:54 -07:00
2022-06-02 10:55:29 -07:00
uint16 * markOffsetPtr = NULL ;
2019-08-23 11:56:54 -07:00
if ( rawAllocData - > mMaxStackTrace = = 1 )
{
memcpy ( ( uint8 * ) result + totalSize - sizeof ( intptr ) - sizeof ( intptr ) , stackTraceInfo , sizeof ( intptr ) ) ;
2022-06-02 10:55:29 -07:00
markOffsetPtr = ( uint16 * ) ( ( uint8 * ) result + totalSize - sizeof ( intptr ) - sizeof ( intptr ) - 2 ) ;
2019-08-23 11:56:54 -07:00
}
else if ( rawAllocData - > mMaxStackTrace > 1 )
2025-01-28 14:49:15 -08:00
{
* ( intptr * ) ( ( uint8 * ) result + totalSize - sizeof ( intptr ) - sizeof ( intptr ) ) = stackTraceCount ;
2019-08-23 11:56:54 -07:00
memcpy ( ( uint8 * ) result + totalSize - sizeof ( intptr ) - sizeof ( intptr ) - stackTraceCount * sizeof ( intptr ) , stackTraceInfo , stackTraceCount * sizeof ( intptr ) ) ;
2022-06-02 10:55:29 -07:00
markOffsetPtr = ( uint16 * ) ( ( uint8 * ) result + totalSize - sizeof ( intptr ) - sizeof ( intptr ) - stackTraceCount * sizeof ( intptr ) - 2 ) ;
}
else
{
markOffsetPtr = ( uint16 * ) ( ( uint8 * ) result + totalSize - sizeof ( intptr ) - 2 ) ;
2019-08-23 11:56:54 -07:00
}
2022-06-02 10:55:29 -07:00
* markOffsetPtr = ( ( uint8 * ) markOffsetPtr ) - ( ( uint8 * ) result + size ) ;
2019-08-23 11:56:54 -07:00
memcpy ( ( uint8 * ) result + totalSize - sizeof ( intptr ) , & rawAllocData , sizeof ( intptr ) ) ;
BfpSystem_InterlockedExchangeAdd32 ( ( uint32 * ) & gRawAllocSize , ( uint32 ) totalSize ) ;
return result ;
}
void BfRawFree ( void * ptr )
{
const PageID p = reinterpret_cast < uintptr_t > ( ptr ) > > kPageShift ;
size_t cl = Static : : pageheap ( ) - > GetSizeClassIfCached ( p ) ;
2019-12-05 06:51:11 -08:00
intptr allocSize = 0 ;
2019-08-23 11:56:54 -07:00
if ( cl = = 0 )
2022-06-02 10:55:29 -07:00
{
2019-08-23 11:56:54 -07:00
auto span = Static : : pageheap ( ) - > GetDescriptor ( p ) ;
if ( span ! = NULL )
{
cl = span - > sizeclass ;
if ( cl = = 0 )
{
allocSize = span - > length < < kPageShift ;
}
}
}
if ( cl ! = 0 )
allocSize = Static : : sizemap ( ) - > class_to_size ( cl ) ;
if ( allocSize = = 0 )
{
2022-06-02 10:55:29 -07:00
Beefy : : String err = Beefy : : StrFormat ( " Memory deallocation requested at invalid address 0x%@ " , ptr ) ;
2019-08-23 11:56:54 -07:00
BF_FATAL ( err ) ;
}
if ( allocSize ! = 0 )
{
BfpSystem_InterlockedExchangeAdd32 ( ( uint32 * ) & gRawAllocSize , ( uint32 ) - allocSize ) ;
// Clear out the dbg alloc data at the end
void * * dbgAllocDataAddr = ( void * * ) ( ( uint8 * ) ptr + allocSize - sizeof ( void * ) ) ;
if ( * dbgAllocDataAddr = = 0 )
{
2022-06-02 10:55:29 -07:00
Beefy : : String err = Beefy : : StrFormat ( " Memory deallocation requested at 0x%@ but no allocation is recorded. Double delete? " , ptr ) ;
BF_FATAL ( err ) ;
return ;
}
auto rawAllocData = ( bf : : System : : DbgRawAllocData * ) * dbgAllocDataAddr ;
uint16 * markOffsetPtr ;
if ( rawAllocData - > mMaxStackTrace = = 1 )
{
markOffsetPtr = ( uint16 * ) ( ( uint8 * ) ptr + allocSize - sizeof ( intptr ) - sizeof ( intptr ) - 2 ) ;
}
else if ( rawAllocData - > mMaxStackTrace > 1 )
{
int stackTraceCount = * ( int * ) ( ( uint8 * ) ptr + allocSize - sizeof ( intptr ) - sizeof ( intptr ) ) ;
markOffsetPtr = ( uint16 * ) ( ( uint8 * ) ptr + allocSize - sizeof ( intptr ) - sizeof ( intptr ) - stackTraceCount * sizeof ( intptr ) - 2 ) ;
}
else
{
markOffsetPtr = ( uint16 * ) ( ( uint8 * ) ptr + allocSize - sizeof ( intptr ) - 2 ) ;
}
int markOffset = * markOffsetPtr ;
2022-06-02 15:01:02 -07:00
if ( ( markOffset < 2 ) | | ( markOffset > = allocSize ) | | ( markOffset > kPageSize + 2 ) | |
2022-06-02 10:55:29 -07:00
( * ( uint16 * ) ( ( uint8 * ) markOffsetPtr - markOffset ) ! = 0xBFBF ) )
{
int requestedSize = ( uint8 * ) markOffsetPtr - ( uint8 * ) ptr - markOffset ;
Beefy : : String err = Beefy : : StrFormat ( " Memory deallocation detected write-past-end error in %d-byte raw allocation at 0x%@ " , requestedSize , ptr ) ;
2019-08-23 11:56:54 -07:00
BF_FATAL ( err ) ;
2022-06-02 10:55:29 -07:00
return ;
2019-08-23 11:56:54 -07:00
}
2022-06-02 10:55:29 -07:00
if ( ( * dbgAllocDataAddr = = & sObjectAllocData ) & & ( ( gBFGC . mMaxRawDeferredObjectFreePercentage ! = 0 ) | | ( ! gDeferredFrees . IsEmpty ( ) ) ) )
2019-08-23 11:56:54 -07:00
{
* dbgAllocDataAddr = NULL ;
AutoCrit autoCrit ( gBFGC . mCritSect ) ;
gMaxRawAllocSize = BF_MAX ( gMaxRawAllocSize , gRawAllocSize ) ;
gDeferredObjectFreeSize + = allocSize ;
DeferredFreeEntry entry ;
entry . mObject = ( bf : : System : : Object * ) ptr ;
entry . mAllocSize = allocSize ;
gDeferredFrees . Add ( entry ) ;
2019-12-05 06:51:11 -08:00
intptr maxDeferredSize = gMaxRawAllocSize * gBFGC . mMaxRawDeferredObjectFreePercentage / 100 ;
2019-08-23 11:56:54 -07:00
while ( gDeferredObjectFreeSize > maxDeferredSize )
{
DeferredFreeEntry entry = gDeferredFrees . PopBack ( ) ;
gDeferredObjectFreeSize - = entry . mAllocSize ;
memset ( entry . mObject , 0xDD , allocSize - sizeof ( void * ) ) ;
tc_free ( entry . mObject ) ;
}
return ;
}
* dbgAllocDataAddr = NULL ;
BF_FULL_MEMORY_FENCE ( ) ;
memset ( ptr , 0xDD , allocSize - sizeof ( void * ) ) ;
}
tc_free ( ptr ) ;
}
# else // BF_GC_SUPPORTED
void * BfRawAllocate ( intptr size , bf : : System : : DbgRawAllocData * rawAllocData , void * stackTraceInfo , int stackTraceCount )
{
return malloc ( size ) ;
}
void BfRawFree ( void * ptr )
{
free ( ptr ) ;
}
# endif