1
0
Fork 0
mirror of https://github.com/beefytech/Beef.git synced 2025-07-04 15:26:00 +02:00

Added TCMalloc and JEMalloc projects

This commit is contained in:
Brian Fiete 2022-06-02 17:55:17 -07:00
parent 53376f3861
commit 652142e189
242 changed files with 67746 additions and 6 deletions

1891
BeefRT/JEMalloc/src/arena.c Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,820 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
/******************************************************************************/
/* Data. */
/* This option should be opt-in only. */
#define BACKGROUND_THREAD_DEFAULT false
/* Read-only after initialization. */
bool opt_background_thread = BACKGROUND_THREAD_DEFAULT;
size_t opt_max_background_threads = MAX_BACKGROUND_THREAD_LIMIT + 1;
/* Used for thread creation, termination and stats. */
malloc_mutex_t background_thread_lock;
/* Indicates global state. Atomic because decay reads this w/o locking. */
atomic_b_t background_thread_enabled_state;
size_t n_background_threads;
size_t max_background_threads;
/* Thread info per-index. */
background_thread_info_t *background_thread_info;
/******************************************************************************/
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
void *(*)(void *), void *__restrict);
static void
pthread_create_wrapper_init(void) {
#ifdef JEMALLOC_LAZY_LOCK
if (!isthreaded) {
isthreaded = true;
}
#endif
}
int
pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr,
void *(*start_routine)(void *), void *__restrict arg) {
pthread_create_wrapper_init();
return pthread_create_fptr(thread, attr, start_routine, arg);
}
#endif /* JEMALLOC_PTHREAD_CREATE_WRAPPER */
#ifndef JEMALLOC_BACKGROUND_THREAD
#define NOT_REACHED { not_reached(); }
bool background_thread_create(tsd_t *tsd, unsigned arena_ind) NOT_REACHED
bool background_threads_enable(tsd_t *tsd) NOT_REACHED
bool background_threads_disable(tsd_t *tsd) NOT_REACHED
bool background_thread_is_started(background_thread_info_t *info) NOT_REACHED
void background_thread_wakeup_early(background_thread_info_t *info,
nstime_t *remaining_sleep) NOT_REACHED
void background_thread_prefork0(tsdn_t *tsdn) NOT_REACHED
void background_thread_prefork1(tsdn_t *tsdn) NOT_REACHED
void background_thread_postfork_parent(tsdn_t *tsdn) NOT_REACHED
void background_thread_postfork_child(tsdn_t *tsdn) NOT_REACHED
bool background_thread_stats_read(tsdn_t *tsdn,
background_thread_stats_t *stats) NOT_REACHED
void background_thread_ctl_init(tsdn_t *tsdn) NOT_REACHED
#undef NOT_REACHED
#else
static bool background_thread_enabled_at_fork;
static void
background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) {
background_thread_wakeup_time_set(tsdn, info, 0);
info->npages_to_purge_new = 0;
if (config_stats) {
info->tot_n_runs = 0;
nstime_init_zero(&info->tot_sleep_time);
}
}
static inline bool
set_current_thread_affinity(int cpu) {
#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
cpu_set_t cpuset;
#else
# ifndef __NetBSD__
cpuset_t cpuset;
# else
cpuset_t *cpuset;
# endif
#endif
#ifndef __NetBSD__
CPU_ZERO(&cpuset);
CPU_SET(cpu, &cpuset);
#else
cpuset = cpuset_create();
#endif
#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
return (sched_setaffinity(0, sizeof(cpu_set_t), &cpuset) != 0);
#else
# ifndef __NetBSD__
int ret = pthread_setaffinity_np(pthread_self(), sizeof(cpuset_t),
&cpuset);
# else
int ret = pthread_setaffinity_np(pthread_self(), cpuset_size(cpuset),
cpuset);
cpuset_destroy(cpuset);
# endif
return ret != 0;
#endif
}
#define BILLION UINT64_C(1000000000)
/* Minimal sleep interval 100 ms. */
#define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10)
static void
background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
uint64_t interval) {
if (config_stats) {
info->tot_n_runs++;
}
info->npages_to_purge_new = 0;
struct timeval tv;
/* Specific clock required by timedwait. */
gettimeofday(&tv, NULL);
nstime_t before_sleep;
nstime_init2(&before_sleep, tv.tv_sec, tv.tv_usec * 1000);
int ret;
if (interval == BACKGROUND_THREAD_INDEFINITE_SLEEP) {
background_thread_wakeup_time_set(tsdn, info,
BACKGROUND_THREAD_INDEFINITE_SLEEP);
ret = pthread_cond_wait(&info->cond, &info->mtx.lock);
assert(ret == 0);
} else {
assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS &&
interval <= BACKGROUND_THREAD_INDEFINITE_SLEEP);
/* We need malloc clock (can be different from tv). */
nstime_t next_wakeup;
nstime_init_update(&next_wakeup);
nstime_iadd(&next_wakeup, interval);
assert(nstime_ns(&next_wakeup) <
BACKGROUND_THREAD_INDEFINITE_SLEEP);
background_thread_wakeup_time_set(tsdn, info,
nstime_ns(&next_wakeup));
nstime_t ts_wakeup;
nstime_copy(&ts_wakeup, &before_sleep);
nstime_iadd(&ts_wakeup, interval);
struct timespec ts;
ts.tv_sec = (size_t)nstime_sec(&ts_wakeup);
ts.tv_nsec = (size_t)nstime_nsec(&ts_wakeup);
assert(!background_thread_indefinite_sleep(info));
ret = pthread_cond_timedwait(&info->cond, &info->mtx.lock, &ts);
assert(ret == ETIMEDOUT || ret == 0);
}
if (config_stats) {
gettimeofday(&tv, NULL);
nstime_t after_sleep;
nstime_init2(&after_sleep, tv.tv_sec, tv.tv_usec * 1000);
if (nstime_compare(&after_sleep, &before_sleep) > 0) {
nstime_subtract(&after_sleep, &before_sleep);
nstime_add(&info->tot_sleep_time, &after_sleep);
}
}
}
static bool
background_thread_pause_check(tsdn_t *tsdn, background_thread_info_t *info) {
if (unlikely(info->state == background_thread_paused)) {
malloc_mutex_unlock(tsdn, &info->mtx);
/* Wait on global lock to update status. */
malloc_mutex_lock(tsdn, &background_thread_lock);
malloc_mutex_unlock(tsdn, &background_thread_lock);
malloc_mutex_lock(tsdn, &info->mtx);
return true;
}
return false;
}
static inline void
background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info,
unsigned ind) {
uint64_t ns_until_deferred = BACKGROUND_THREAD_DEFERRED_MAX;
unsigned narenas = narenas_total_get();
bool slept_indefinitely = background_thread_indefinite_sleep(info);
for (unsigned i = ind; i < narenas; i += max_background_threads) {
arena_t *arena = arena_get(tsdn, i, false);
if (!arena) {
continue;
}
/*
* If thread was woken up from the indefinite sleep, don't
* do the work instantly, but rather check when the deferred
* work that caused this thread to wake up is scheduled for.
*/
if (!slept_indefinitely) {
arena_do_deferred_work(tsdn, arena);
}
if (ns_until_deferred <= BACKGROUND_THREAD_MIN_INTERVAL_NS) {
/* Min interval will be used. */
continue;
}
uint64_t ns_arena_deferred = pa_shard_time_until_deferred_work(
tsdn, &arena->pa_shard);
if (ns_arena_deferred < ns_until_deferred) {
ns_until_deferred = ns_arena_deferred;
}
}
uint64_t sleep_ns;
if (ns_until_deferred == BACKGROUND_THREAD_DEFERRED_MAX) {
sleep_ns = BACKGROUND_THREAD_INDEFINITE_SLEEP;
} else {
sleep_ns =
(ns_until_deferred < BACKGROUND_THREAD_MIN_INTERVAL_NS)
? BACKGROUND_THREAD_MIN_INTERVAL_NS
: ns_until_deferred;
}
background_thread_sleep(tsdn, info, sleep_ns);
}
static bool
background_threads_disable_single(tsd_t *tsd, background_thread_info_t *info) {
if (info == &background_thread_info[0]) {
malloc_mutex_assert_owner(tsd_tsdn(tsd),
&background_thread_lock);
} else {
malloc_mutex_assert_not_owner(tsd_tsdn(tsd),
&background_thread_lock);
}
pre_reentrancy(tsd, NULL);
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
bool has_thread;
assert(info->state != background_thread_paused);
if (info->state == background_thread_started) {
has_thread = true;
info->state = background_thread_stopped;
pthread_cond_signal(&info->cond);
} else {
has_thread = false;
}
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
if (!has_thread) {
post_reentrancy(tsd);
return false;
}
void *ret;
if (pthread_join(info->thread, &ret)) {
post_reentrancy(tsd);
return true;
}
assert(ret == NULL);
n_background_threads--;
post_reentrancy(tsd);
return false;
}
static void *background_thread_entry(void *ind_arg);
static int
background_thread_create_signals_masked(pthread_t *thread,
const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg) {
/*
* Mask signals during thread creation so that the thread inherits
* an empty signal set.
*/
sigset_t set;
sigfillset(&set);
sigset_t oldset;
int mask_err = pthread_sigmask(SIG_SETMASK, &set, &oldset);
if (mask_err != 0) {
return mask_err;
}
int create_err = pthread_create_wrapper(thread, attr, start_routine,
arg);
/*
* Restore the signal mask. Failure to restore the signal mask here
* changes program behavior.
*/
int restore_err = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
if (restore_err != 0) {
malloc_printf("<jemalloc>: background thread creation "
"failed (%d), and signal mask restoration failed "
"(%d)\n", create_err, restore_err);
if (opt_abort) {
abort();
}
}
return create_err;
}
static bool
check_background_thread_creation(tsd_t *tsd, unsigned *n_created,
bool *created_threads) {
bool ret = false;
if (likely(*n_created == n_background_threads)) {
return ret;
}
tsdn_t *tsdn = tsd_tsdn(tsd);
malloc_mutex_unlock(tsdn, &background_thread_info[0].mtx);
for (unsigned i = 1; i < max_background_threads; i++) {
if (created_threads[i]) {
continue;
}
background_thread_info_t *info = &background_thread_info[i];
malloc_mutex_lock(tsdn, &info->mtx);
/*
* In case of the background_thread_paused state because of
* arena reset, delay the creation.
*/
bool create = (info->state == background_thread_started);
malloc_mutex_unlock(tsdn, &info->mtx);
if (!create) {
continue;
}
pre_reentrancy(tsd, NULL);
int err = background_thread_create_signals_masked(&info->thread,
NULL, background_thread_entry, (void *)(uintptr_t)i);
post_reentrancy(tsd);
if (err == 0) {
(*n_created)++;
created_threads[i] = true;
} else {
malloc_printf("<jemalloc>: background thread "
"creation failed (%d)\n", err);
if (opt_abort) {
abort();
}
}
/* Return to restart the loop since we unlocked. */
ret = true;
break;
}
malloc_mutex_lock(tsdn, &background_thread_info[0].mtx);
return ret;
}
static void
background_thread0_work(tsd_t *tsd) {
/* Thread0 is also responsible for launching / terminating threads. */
VARIABLE_ARRAY(bool, created_threads, max_background_threads);
unsigned i;
for (i = 1; i < max_background_threads; i++) {
created_threads[i] = false;
}
/* Start working, and create more threads when asked. */
unsigned n_created = 1;
while (background_thread_info[0].state != background_thread_stopped) {
if (background_thread_pause_check(tsd_tsdn(tsd),
&background_thread_info[0])) {
continue;
}
if (check_background_thread_creation(tsd, &n_created,
(bool *)&created_threads)) {
continue;
}
background_work_sleep_once(tsd_tsdn(tsd),
&background_thread_info[0], 0);
}
/*
* Shut down other threads at exit. Note that the ctl thread is holding
* the global background_thread mutex (and is waiting) for us.
*/
assert(!background_thread_enabled());
for (i = 1; i < max_background_threads; i++) {
background_thread_info_t *info = &background_thread_info[i];
assert(info->state != background_thread_paused);
if (created_threads[i]) {
background_threads_disable_single(tsd, info);
} else {
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
if (info->state != background_thread_stopped) {
/* The thread was not created. */
assert(info->state ==
background_thread_started);
n_background_threads--;
info->state = background_thread_stopped;
}
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
}
}
background_thread_info[0].state = background_thread_stopped;
assert(n_background_threads == 1);
}
static void
background_work(tsd_t *tsd, unsigned ind) {
background_thread_info_t *info = &background_thread_info[ind];
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
background_thread_wakeup_time_set(tsd_tsdn(tsd), info,
BACKGROUND_THREAD_INDEFINITE_SLEEP);
if (ind == 0) {
background_thread0_work(tsd);
} else {
while (info->state != background_thread_stopped) {
if (background_thread_pause_check(tsd_tsdn(tsd),
info)) {
continue;
}
background_work_sleep_once(tsd_tsdn(tsd), info, ind);
}
}
assert(info->state == background_thread_stopped);
background_thread_wakeup_time_set(tsd_tsdn(tsd), info, 0);
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
}
static void *
background_thread_entry(void *ind_arg) {
unsigned thread_ind = (unsigned)(uintptr_t)ind_arg;
assert(thread_ind < max_background_threads);
#ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
pthread_setname_np(pthread_self(), "jemalloc_bg_thd");
#elif defined(__FreeBSD__) || defined(__DragonFly__)
pthread_set_name_np(pthread_self(), "jemalloc_bg_thd");
#endif
if (opt_percpu_arena != percpu_arena_disabled) {
set_current_thread_affinity((int)thread_ind);
}
/*
* Start periodic background work. We use internal tsd which avoids
* side effects, for example triggering new arena creation (which in
* turn triggers another background thread creation).
*/
background_work(tsd_internal_fetch(), thread_ind);
assert(pthread_equal(pthread_self(),
background_thread_info[thread_ind].thread));
return NULL;
}
static void
background_thread_init(tsd_t *tsd, background_thread_info_t *info) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
info->state = background_thread_started;
background_thread_info_init(tsd_tsdn(tsd), info);
n_background_threads++;
}
static bool
background_thread_create_locked(tsd_t *tsd, unsigned arena_ind) {
assert(have_background_thread);
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
/* We create at most NCPUs threads. */
size_t thread_ind = arena_ind % max_background_threads;
background_thread_info_t *info = &background_thread_info[thread_ind];
bool need_new_thread;
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
need_new_thread = background_thread_enabled() &&
(info->state == background_thread_stopped);
if (need_new_thread) {
background_thread_init(tsd, info);
}
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
if (!need_new_thread) {
return false;
}
if (arena_ind != 0) {
/* Threads are created asynchronously by Thread 0. */
background_thread_info_t *t0 = &background_thread_info[0];
malloc_mutex_lock(tsd_tsdn(tsd), &t0->mtx);
assert(t0->state == background_thread_started);
pthread_cond_signal(&t0->cond);
malloc_mutex_unlock(tsd_tsdn(tsd), &t0->mtx);
return false;
}
pre_reentrancy(tsd, NULL);
/*
* To avoid complications (besides reentrancy), create internal
* background threads with the underlying pthread_create.
*/
int err = background_thread_create_signals_masked(&info->thread, NULL,
background_thread_entry, (void *)thread_ind);
post_reentrancy(tsd);
if (err != 0) {
malloc_printf("<jemalloc>: arena 0 background thread creation "
"failed (%d)\n", err);
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
info->state = background_thread_stopped;
n_background_threads--;
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
return true;
}
return false;
}
/* Create a new background thread if needed. */
bool
background_thread_create(tsd_t *tsd, unsigned arena_ind) {
assert(have_background_thread);
bool ret;
malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
ret = background_thread_create_locked(tsd, arena_ind);
malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
return ret;
}
bool
background_threads_enable(tsd_t *tsd) {
assert(n_background_threads == 0);
assert(background_thread_enabled());
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
VARIABLE_ARRAY(bool, marked, max_background_threads);
unsigned nmarked;
for (unsigned i = 0; i < max_background_threads; i++) {
marked[i] = false;
}
nmarked = 0;
/* Thread 0 is required and created at the end. */
marked[0] = true;
/* Mark the threads we need to create for thread 0. */
unsigned narenas = narenas_total_get();
for (unsigned i = 1; i < narenas; i++) {
if (marked[i % max_background_threads] ||
arena_get(tsd_tsdn(tsd), i, false) == NULL) {
continue;
}
background_thread_info_t *info = &background_thread_info[
i % max_background_threads];
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
assert(info->state == background_thread_stopped);
background_thread_init(tsd, info);
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
marked[i % max_background_threads] = true;
if (++nmarked == max_background_threads) {
break;
}
}
bool err = background_thread_create_locked(tsd, 0);
if (err) {
return true;
}
for (unsigned i = 0; i < narenas; i++) {
arena_t *arena = arena_get(tsd_tsdn(tsd), i, false);
if (arena != NULL) {
pa_shard_set_deferral_allowed(tsd_tsdn(tsd),
&arena->pa_shard, true);
}
}
return false;
}
bool
background_threads_disable(tsd_t *tsd) {
assert(!background_thread_enabled());
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
/* Thread 0 will be responsible for terminating other threads. */
if (background_threads_disable_single(tsd,
&background_thread_info[0])) {
return true;
}
assert(n_background_threads == 0);
unsigned narenas = narenas_total_get();
for (unsigned i = 0; i < narenas; i++) {
arena_t *arena = arena_get(tsd_tsdn(tsd), i, false);
if (arena != NULL) {
pa_shard_set_deferral_allowed(tsd_tsdn(tsd),
&arena->pa_shard, false);
}
}
return false;
}
bool
background_thread_is_started(background_thread_info_t *info) {
return info->state == background_thread_started;
}
void
background_thread_wakeup_early(background_thread_info_t *info,
nstime_t *remaining_sleep) {
/*
* This is an optimization to increase batching. At this point
* we know that background thread wakes up soon, so the time to cache
* the just freed memory is bounded and low.
*/
if (remaining_sleep != NULL && nstime_ns(remaining_sleep) <
BACKGROUND_THREAD_MIN_INTERVAL_NS) {
return;
}
pthread_cond_signal(&info->cond);
}
void
background_thread_prefork0(tsdn_t *tsdn) {
malloc_mutex_prefork(tsdn, &background_thread_lock);
background_thread_enabled_at_fork = background_thread_enabled();
}
void
background_thread_prefork1(tsdn_t *tsdn) {
for (unsigned i = 0; i < max_background_threads; i++) {
malloc_mutex_prefork(tsdn, &background_thread_info[i].mtx);
}
}
void
background_thread_postfork_parent(tsdn_t *tsdn) {
for (unsigned i = 0; i < max_background_threads; i++) {
malloc_mutex_postfork_parent(tsdn,
&background_thread_info[i].mtx);
}
malloc_mutex_postfork_parent(tsdn, &background_thread_lock);
}
void
background_thread_postfork_child(tsdn_t *tsdn) {
for (unsigned i = 0; i < max_background_threads; i++) {
malloc_mutex_postfork_child(tsdn,
&background_thread_info[i].mtx);
}
malloc_mutex_postfork_child(tsdn, &background_thread_lock);
if (!background_thread_enabled_at_fork) {
return;
}
/* Clear background_thread state (reset to disabled for child). */
malloc_mutex_lock(tsdn, &background_thread_lock);
n_background_threads = 0;
background_thread_enabled_set(tsdn, false);
for (unsigned i = 0; i < max_background_threads; i++) {
background_thread_info_t *info = &background_thread_info[i];
malloc_mutex_lock(tsdn, &info->mtx);
info->state = background_thread_stopped;
int ret = pthread_cond_init(&info->cond, NULL);
assert(ret == 0);
background_thread_info_init(tsdn, info);
malloc_mutex_unlock(tsdn, &info->mtx);
}
malloc_mutex_unlock(tsdn, &background_thread_lock);
}
bool
background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
assert(config_stats);
malloc_mutex_lock(tsdn, &background_thread_lock);
if (!background_thread_enabled()) {
malloc_mutex_unlock(tsdn, &background_thread_lock);
return true;
}
nstime_init_zero(&stats->run_interval);
memset(&stats->max_counter_per_bg_thd, 0, sizeof(mutex_prof_data_t));
uint64_t num_runs = 0;
stats->num_threads = n_background_threads;
for (unsigned i = 0; i < max_background_threads; i++) {
background_thread_info_t *info = &background_thread_info[i];
if (malloc_mutex_trylock(tsdn, &info->mtx)) {
/*
* Each background thread run may take a long time;
* avoid waiting on the stats if the thread is active.
*/
continue;
}
if (info->state != background_thread_stopped) {
num_runs += info->tot_n_runs;
nstime_add(&stats->run_interval, &info->tot_sleep_time);
malloc_mutex_prof_max_update(tsdn,
&stats->max_counter_per_bg_thd, &info->mtx);
}
malloc_mutex_unlock(tsdn, &info->mtx);
}
stats->num_runs = num_runs;
if (num_runs > 0) {
nstime_idivide(&stats->run_interval, num_runs);
}
malloc_mutex_unlock(tsdn, &background_thread_lock);
return false;
}
#undef BACKGROUND_THREAD_NPAGES_THRESHOLD
#undef BILLION
#undef BACKGROUND_THREAD_MIN_INTERVAL_NS
#ifdef JEMALLOC_HAVE_DLSYM
#include <dlfcn.h>
#endif
static bool
pthread_create_fptr_init(void) {
if (pthread_create_fptr != NULL) {
return false;
}
/*
* Try the next symbol first, because 1) when use lazy_lock we have a
* wrapper for pthread_create; and 2) application may define its own
* wrapper as well (and can call malloc within the wrapper).
*/
#ifdef JEMALLOC_HAVE_DLSYM
pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
#else
pthread_create_fptr = NULL;
#endif
if (pthread_create_fptr == NULL) {
if (config_lazy_lock) {
malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
"\"pthread_create\")\n");
abort();
} else {
/* Fall back to the default symbol. */
pthread_create_fptr = pthread_create;
}
}
return false;
}
/*
* When lazy lock is enabled, we need to make sure setting isthreaded before
* taking any background_thread locks. This is called early in ctl (instead of
* wait for the pthread_create calls to trigger) because the mutex is required
* before creating background threads.
*/
void
background_thread_ctl_init(tsdn_t *tsdn) {
malloc_mutex_assert_not_owner(tsdn, &background_thread_lock);
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
pthread_create_fptr_init();
pthread_create_wrapper_init();
#endif
}
#endif /* defined(JEMALLOC_BACKGROUND_THREAD) */
bool
background_thread_boot0(void) {
if (!have_background_thread && opt_background_thread) {
malloc_printf("<jemalloc>: option background_thread currently "
"supports pthread only\n");
return true;
}
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
if ((config_lazy_lock || opt_background_thread) &&
pthread_create_fptr_init()) {
return true;
}
#endif
return false;
}
bool
background_thread_boot1(tsdn_t *tsdn, base_t *base) {
#ifdef JEMALLOC_BACKGROUND_THREAD
assert(have_background_thread);
assert(narenas_total_get() > 0);
if (opt_max_background_threads > MAX_BACKGROUND_THREAD_LIMIT) {
opt_max_background_threads = DEFAULT_NUM_BACKGROUND_THREAD;
}
max_background_threads = opt_max_background_threads;
background_thread_enabled_set(tsdn, opt_background_thread);
if (malloc_mutex_init(&background_thread_lock,
"background_thread_global",
WITNESS_RANK_BACKGROUND_THREAD_GLOBAL,
malloc_mutex_rank_exclusive)) {
return true;
}
background_thread_info = (background_thread_info_t *)base_alloc(tsdn,
base, opt_max_background_threads *
sizeof(background_thread_info_t), CACHELINE);
if (background_thread_info == NULL) {
return true;
}
for (unsigned i = 0; i < max_background_threads; i++) {
background_thread_info_t *info = &background_thread_info[i];
/* Thread mutex is rank_inclusive because of thread0. */
if (malloc_mutex_init(&info->mtx, "background_thread",
WITNESS_RANK_BACKGROUND_THREAD,
malloc_mutex_address_ordered)) {
return true;
}
if (pthread_cond_init(&info->cond, NULL)) {
return true;
}
malloc_mutex_lock(tsdn, &info->mtx);
info->state = background_thread_stopped;
background_thread_info_init(tsdn, info);
malloc_mutex_unlock(tsdn, &info->mtx);
}
#endif
return false;
}

529
BeefRT/JEMalloc/src/base.c Normal file
View file

@ -0,0 +1,529 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/sz.h"
/*
* In auto mode, arenas switch to huge pages for the base allocator on the
* second base block. a0 switches to thp on the 5th block (after 20 megabytes
* of metadata), since more metadata (e.g. rtree nodes) come from a0's base.
*/
#define BASE_AUTO_THP_THRESHOLD 2
#define BASE_AUTO_THP_THRESHOLD_A0 5
/******************************************************************************/
/* Data. */
static base_t *b0;
metadata_thp_mode_t opt_metadata_thp = METADATA_THP_DEFAULT;
const char *metadata_thp_mode_names[] = {
"disabled",
"auto",
"always"
};
/******************************************************************************/
static inline bool
metadata_thp_madvise(void) {
return (metadata_thp_enabled() &&
(init_system_thp_mode == thp_mode_default));
}
static void *
base_map(tsdn_t *tsdn, ehooks_t *ehooks, unsigned ind, size_t size) {
void *addr;
bool zero = true;
bool commit = true;
/* Use huge page sizes and alignment regardless of opt_metadata_thp. */
assert(size == HUGEPAGE_CEILING(size));
size_t alignment = HUGEPAGE;
if (ehooks_are_default(ehooks)) {
addr = extent_alloc_mmap(NULL, size, alignment, &zero, &commit);
if (have_madvise_huge && addr) {
pages_set_thp_state(addr, size);
}
} else {
addr = ehooks_alloc(tsdn, ehooks, NULL, size, alignment, &zero,
&commit);
}
return addr;
}
static void
base_unmap(tsdn_t *tsdn, ehooks_t *ehooks, unsigned ind, void *addr,
size_t size) {
/*
* Cascade through dalloc, decommit, purge_forced, and purge_lazy,
* stopping at first success. This cascade is performed for consistency
* with the cascade in extent_dalloc_wrapper() because an application's
* custom hooks may not support e.g. dalloc. This function is only ever
* called as a side effect of arena destruction, so although it might
* seem pointless to do anything besides dalloc here, the application
* may in fact want the end state of all associated virtual memory to be
* in some consistent-but-allocated state.
*/
if (ehooks_are_default(ehooks)) {
if (!extent_dalloc_mmap(addr, size)) {
goto label_done;
}
if (!pages_decommit(addr, size)) {
goto label_done;
}
if (!pages_purge_forced(addr, size)) {
goto label_done;
}
if (!pages_purge_lazy(addr, size)) {
goto label_done;
}
/* Nothing worked. This should never happen. */
not_reached();
} else {
if (!ehooks_dalloc(tsdn, ehooks, addr, size, true)) {
goto label_done;
}
if (!ehooks_decommit(tsdn, ehooks, addr, size, 0, size)) {
goto label_done;
}
if (!ehooks_purge_forced(tsdn, ehooks, addr, size, 0, size)) {
goto label_done;
}
if (!ehooks_purge_lazy(tsdn, ehooks, addr, size, 0, size)) {
goto label_done;
}
/* Nothing worked. That's the application's problem. */
}
label_done:
if (metadata_thp_madvise()) {
/* Set NOHUGEPAGE after unmap to avoid kernel defrag. */
assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 &&
(size & HUGEPAGE_MASK) == 0);
pages_nohuge(addr, size);
}
}
static void
base_edata_init(size_t *extent_sn_next, edata_t *edata, void *addr,
size_t size) {
size_t sn;
sn = *extent_sn_next;
(*extent_sn_next)++;
edata_binit(edata, addr, size, sn);
}
static size_t
base_get_num_blocks(base_t *base, bool with_new_block) {
base_block_t *b = base->blocks;
assert(b != NULL);
size_t n_blocks = with_new_block ? 2 : 1;
while (b->next != NULL) {
n_blocks++;
b = b->next;
}
return n_blocks;
}
static void
base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
assert(opt_metadata_thp == metadata_thp_auto);
malloc_mutex_assert_owner(tsdn, &base->mtx);
if (base->auto_thp_switched) {
return;
}
/* Called when adding a new block. */
bool should_switch;
if (base_ind_get(base) != 0) {
should_switch = (base_get_num_blocks(base, true) ==
BASE_AUTO_THP_THRESHOLD);
} else {
should_switch = (base_get_num_blocks(base, true) ==
BASE_AUTO_THP_THRESHOLD_A0);
}
if (!should_switch) {
return;
}
base->auto_thp_switched = true;
assert(!config_stats || base->n_thp == 0);
/* Make the initial blocks THP lazily. */
base_block_t *block = base->blocks;
while (block != NULL) {
assert((block->size & HUGEPAGE_MASK) == 0);
pages_huge(block, block->size);
if (config_stats) {
base->n_thp += HUGEPAGE_CEILING(block->size -
edata_bsize_get(&block->edata)) >> LG_HUGEPAGE;
}
block = block->next;
assert(block == NULL || (base_ind_get(base) == 0));
}
}
static void *
base_extent_bump_alloc_helper(edata_t *edata, size_t *gap_size, size_t size,
size_t alignment) {
void *ret;
assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM));
assert(size == ALIGNMENT_CEILING(size, alignment));
*gap_size = ALIGNMENT_CEILING((uintptr_t)edata_addr_get(edata),
alignment) - (uintptr_t)edata_addr_get(edata);
ret = (void *)((uintptr_t)edata_addr_get(edata) + *gap_size);
assert(edata_bsize_get(edata) >= *gap_size + size);
edata_binit(edata, (void *)((uintptr_t)edata_addr_get(edata) +
*gap_size + size), edata_bsize_get(edata) - *gap_size - size,
edata_sn_get(edata));
return ret;
}
static void
base_extent_bump_alloc_post(base_t *base, edata_t *edata, size_t gap_size,
void *addr, size_t size) {
if (edata_bsize_get(edata) > 0) {
/*
* Compute the index for the largest size class that does not
* exceed extent's size.
*/
szind_t index_floor =
sz_size2index(edata_bsize_get(edata) + 1) - 1;
edata_heap_insert(&base->avail[index_floor], edata);
}
if (config_stats) {
base->allocated += size;
/*
* Add one PAGE to base_resident for every page boundary that is
* crossed by the new allocation. Adjust n_thp similarly when
* metadata_thp is enabled.
*/
base->resident += PAGE_CEILING((uintptr_t)addr + size) -
PAGE_CEILING((uintptr_t)addr - gap_size);
assert(base->allocated <= base->resident);
assert(base->resident <= base->mapped);
if (metadata_thp_madvise() && (opt_metadata_thp ==
metadata_thp_always || base->auto_thp_switched)) {
base->n_thp += (HUGEPAGE_CEILING((uintptr_t)addr + size)
- HUGEPAGE_CEILING((uintptr_t)addr - gap_size)) >>
LG_HUGEPAGE;
assert(base->mapped >= base->n_thp << LG_HUGEPAGE);
}
}
}
static void *
base_extent_bump_alloc(base_t *base, edata_t *edata, size_t size,
size_t alignment) {
void *ret;
size_t gap_size;
ret = base_extent_bump_alloc_helper(edata, &gap_size, size, alignment);
base_extent_bump_alloc_post(base, edata, gap_size, ret, size);
return ret;
}
/*
* Allocate a block of virtual memory that is large enough to start with a
* base_block_t header, followed by an object of specified size and alignment.
* On success a pointer to the initialized base_block_t header is returned.
*/
static base_block_t *
base_block_alloc(tsdn_t *tsdn, base_t *base, ehooks_t *ehooks, unsigned ind,
pszind_t *pind_last, size_t *extent_sn_next, size_t size,
size_t alignment) {
alignment = ALIGNMENT_CEILING(alignment, QUANTUM);
size_t usize = ALIGNMENT_CEILING(size, alignment);
size_t header_size = sizeof(base_block_t);
size_t gap_size = ALIGNMENT_CEILING(header_size, alignment) -
header_size;
/*
* Create increasingly larger blocks in order to limit the total number
* of disjoint virtual memory ranges. Choose the next size in the page
* size class series (skipping size classes that are not a multiple of
* HUGEPAGE), or a size large enough to satisfy the requested size and
* alignment, whichever is larger.
*/
size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size
+ usize));
pszind_t pind_next = (*pind_last + 1 < sz_psz2ind(SC_LARGE_MAXCLASS)) ?
*pind_last + 1 : *pind_last;
size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next));
size_t block_size = (min_block_size > next_block_size) ? min_block_size
: next_block_size;
base_block_t *block = (base_block_t *)base_map(tsdn, ehooks, ind,
block_size);
if (block == NULL) {
return NULL;
}
if (metadata_thp_madvise()) {
void *addr = (void *)block;
assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 &&
(block_size & HUGEPAGE_MASK) == 0);
if (opt_metadata_thp == metadata_thp_always) {
pages_huge(addr, block_size);
} else if (opt_metadata_thp == metadata_thp_auto &&
base != NULL) {
/* base != NULL indicates this is not a new base. */
malloc_mutex_lock(tsdn, &base->mtx);
base_auto_thp_switch(tsdn, base);
if (base->auto_thp_switched) {
pages_huge(addr, block_size);
}
malloc_mutex_unlock(tsdn, &base->mtx);
}
}
*pind_last = sz_psz2ind(block_size);
block->size = block_size;
block->next = NULL;
assert(block_size >= header_size);
base_edata_init(extent_sn_next, &block->edata,
(void *)((uintptr_t)block + header_size), block_size - header_size);
return block;
}
/*
* Allocate an extent that is at least as large as specified size, with
* specified alignment.
*/
static edata_t *
base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
malloc_mutex_assert_owner(tsdn, &base->mtx);
ehooks_t *ehooks = base_ehooks_get_for_metadata(base);
/*
* Drop mutex during base_block_alloc(), because an extent hook will be
* called.
*/
malloc_mutex_unlock(tsdn, &base->mtx);
base_block_t *block = base_block_alloc(tsdn, base, ehooks,
base_ind_get(base), &base->pind_last, &base->extent_sn_next, size,
alignment);
malloc_mutex_lock(tsdn, &base->mtx);
if (block == NULL) {
return NULL;
}
block->next = base->blocks;
base->blocks = block;
if (config_stats) {
base->allocated += sizeof(base_block_t);
base->resident += PAGE_CEILING(sizeof(base_block_t));
base->mapped += block->size;
if (metadata_thp_madvise() &&
!(opt_metadata_thp == metadata_thp_auto
&& !base->auto_thp_switched)) {
assert(base->n_thp > 0);
base->n_thp += HUGEPAGE_CEILING(sizeof(base_block_t)) >>
LG_HUGEPAGE;
}
assert(base->allocated <= base->resident);
assert(base->resident <= base->mapped);
assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
}
return &block->edata;
}
base_t *
b0get(void) {
return b0;
}
base_t *
base_new(tsdn_t *tsdn, unsigned ind, const extent_hooks_t *extent_hooks,
bool metadata_use_hooks) {
pszind_t pind_last = 0;
size_t extent_sn_next = 0;
/*
* The base will contain the ehooks eventually, but it itself is
* allocated using them. So we use some stack ehooks to bootstrap its
* memory, and then initialize the ehooks within the base_t.
*/
ehooks_t fake_ehooks;
ehooks_init(&fake_ehooks, metadata_use_hooks ?
(extent_hooks_t *)extent_hooks :
(extent_hooks_t *)&ehooks_default_extent_hooks, ind);
base_block_t *block = base_block_alloc(tsdn, NULL, &fake_ehooks, ind,
&pind_last, &extent_sn_next, sizeof(base_t), QUANTUM);
if (block == NULL) {
return NULL;
}
size_t gap_size;
size_t base_alignment = CACHELINE;
size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->edata,
&gap_size, base_size, base_alignment);
ehooks_init(&base->ehooks, (extent_hooks_t *)extent_hooks, ind);
ehooks_init(&base->ehooks_base, metadata_use_hooks ?
(extent_hooks_t *)extent_hooks :
(extent_hooks_t *)&ehooks_default_extent_hooks, ind);
if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE,
malloc_mutex_rank_exclusive)) {
base_unmap(tsdn, &fake_ehooks, ind, block, block->size);
return NULL;
}
base->pind_last = pind_last;
base->extent_sn_next = extent_sn_next;
base->blocks = block;
base->auto_thp_switched = false;
for (szind_t i = 0; i < SC_NSIZES; i++) {
edata_heap_new(&base->avail[i]);
}
if (config_stats) {
base->allocated = sizeof(base_block_t);
base->resident = PAGE_CEILING(sizeof(base_block_t));
base->mapped = block->size;
base->n_thp = (opt_metadata_thp == metadata_thp_always) &&
metadata_thp_madvise() ? HUGEPAGE_CEILING(sizeof(base_block_t))
>> LG_HUGEPAGE : 0;
assert(base->allocated <= base->resident);
assert(base->resident <= base->mapped);
assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
}
base_extent_bump_alloc_post(base, &block->edata, gap_size, base,
base_size);
return base;
}
void
base_delete(tsdn_t *tsdn, base_t *base) {
ehooks_t *ehooks = base_ehooks_get_for_metadata(base);
base_block_t *next = base->blocks;
do {
base_block_t *block = next;
next = block->next;
base_unmap(tsdn, ehooks, base_ind_get(base), block,
block->size);
} while (next != NULL);
}
ehooks_t *
base_ehooks_get(base_t *base) {
return &base->ehooks;
}
ehooks_t *
base_ehooks_get_for_metadata(base_t *base) {
return &base->ehooks_base;
}
extent_hooks_t *
base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) {
extent_hooks_t *old_extent_hooks =
ehooks_get_extent_hooks_ptr(&base->ehooks);
ehooks_init(&base->ehooks, extent_hooks, ehooks_ind_get(&base->ehooks));
return old_extent_hooks;
}
static void *
base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
size_t *esn) {
alignment = QUANTUM_CEILING(alignment);
size_t usize = ALIGNMENT_CEILING(size, alignment);
size_t asize = usize + alignment - QUANTUM;
edata_t *edata = NULL;
malloc_mutex_lock(tsdn, &base->mtx);
for (szind_t i = sz_size2index(asize); i < SC_NSIZES; i++) {
edata = edata_heap_remove_first(&base->avail[i]);
if (edata != NULL) {
/* Use existing space. */
break;
}
}
if (edata == NULL) {
/* Try to allocate more space. */
edata = base_extent_alloc(tsdn, base, usize, alignment);
}
void *ret;
if (edata == NULL) {
ret = NULL;
goto label_return;
}
ret = base_extent_bump_alloc(base, edata, usize, alignment);
if (esn != NULL) {
*esn = (size_t)edata_sn_get(edata);
}
label_return:
malloc_mutex_unlock(tsdn, &base->mtx);
return ret;
}
/*
* base_alloc() returns zeroed memory, which is always demand-zeroed for the
* auto arenas, in order to make multi-page sparse data structures such as radix
* tree nodes efficient with respect to physical memory usage. Upon success a
* pointer to at least size bytes with specified alignment is returned. Note
* that size is rounded up to the nearest multiple of alignment to avoid false
* sharing.
*/
void *
base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
return base_alloc_impl(tsdn, base, size, alignment, NULL);
}
edata_t *
base_alloc_edata(tsdn_t *tsdn, base_t *base) {
size_t esn;
edata_t *edata = base_alloc_impl(tsdn, base, sizeof(edata_t),
EDATA_ALIGNMENT, &esn);
if (edata == NULL) {
return NULL;
}
edata_esn_set(edata, esn);
return edata;
}
void
base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident,
size_t *mapped, size_t *n_thp) {
cassert(config_stats);
malloc_mutex_lock(tsdn, &base->mtx);
assert(base->allocated <= base->resident);
assert(base->resident <= base->mapped);
*allocated = base->allocated;
*resident = base->resident;
*mapped = base->mapped;
*n_thp = base->n_thp;
malloc_mutex_unlock(tsdn, &base->mtx);
}
void
base_prefork(tsdn_t *tsdn, base_t *base) {
malloc_mutex_prefork(tsdn, &base->mtx);
}
void
base_postfork_parent(tsdn_t *tsdn, base_t *base) {
malloc_mutex_postfork_parent(tsdn, &base->mtx);
}
void
base_postfork_child(tsdn_t *tsdn, base_t *base) {
malloc_mutex_postfork_child(tsdn, &base->mtx);
}
bool
base_boot(tsdn_t *tsdn) {
b0 = base_new(tsdn, 0, (extent_hooks_t *)&ehooks_default_extent_hooks,
/* metadata_use_hooks */ true);
return (b0 == NULL);
}

69
BeefRT/JEMalloc/src/bin.c Normal file
View file

@ -0,0 +1,69 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/witness.h"
bool
bin_update_shard_size(unsigned bin_shard_sizes[SC_NBINS], size_t start_size,
size_t end_size, size_t nshards) {
if (nshards > BIN_SHARDS_MAX || nshards == 0) {
return true;
}
if (start_size > SC_SMALL_MAXCLASS) {
return false;
}
if (end_size > SC_SMALL_MAXCLASS) {
end_size = SC_SMALL_MAXCLASS;
}
/* Compute the index since this may happen before sz init. */
szind_t ind1 = sz_size2index_compute(start_size);
szind_t ind2 = sz_size2index_compute(end_size);
for (unsigned i = ind1; i <= ind2; i++) {
bin_shard_sizes[i] = (unsigned)nshards;
}
return false;
}
void
bin_shard_sizes_boot(unsigned bin_shard_sizes[SC_NBINS]) {
/* Load the default number of shards. */
for (unsigned i = 0; i < SC_NBINS; i++) {
bin_shard_sizes[i] = N_BIN_SHARDS_DEFAULT;
}
}
bool
bin_init(bin_t *bin) {
if (malloc_mutex_init(&bin->lock, "bin", WITNESS_RANK_BIN,
malloc_mutex_rank_exclusive)) {
return true;
}
bin->slabcur = NULL;
edata_heap_new(&bin->slabs_nonfull);
edata_list_active_init(&bin->slabs_full);
if (config_stats) {
memset(&bin->stats, 0, sizeof(bin_stats_t));
}
return false;
}
void
bin_prefork(tsdn_t *tsdn, bin_t *bin) {
malloc_mutex_prefork(tsdn, &bin->lock);
}
void
bin_postfork_parent(tsdn_t *tsdn, bin_t *bin) {
malloc_mutex_postfork_parent(tsdn, &bin->lock);
}
void
bin_postfork_child(tsdn_t *tsdn, bin_t *bin) {
malloc_mutex_postfork_child(tsdn, &bin->lock);
}

View file

@ -0,0 +1,30 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/bin_info.h"
bin_info_t bin_infos[SC_NBINS];
static void
bin_infos_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
bin_info_t infos[SC_NBINS]) {
for (unsigned i = 0; i < SC_NBINS; i++) {
bin_info_t *bin_info = &infos[i];
sc_t *sc = &sc_data->sc[i];
bin_info->reg_size = ((size_t)1U << sc->lg_base)
+ ((size_t)sc->ndelta << sc->lg_delta);
bin_info->slab_size = (sc->pgs << LG_PAGE);
bin_info->nregs =
(uint32_t)(bin_info->slab_size / bin_info->reg_size);
bin_info->n_shards = bin_shard_sizes[i];
bitmap_info_t bitmap_info = BITMAP_INFO_INITIALIZER(
bin_info->nregs);
bin_info->bitmap_info = bitmap_info;
}
}
void
bin_info_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) {
assert(sc_data->initialized);
bin_infos_init(sc_data, bin_shard_sizes, bin_infos);
}

View file

@ -0,0 +1,120 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
/******************************************************************************/
#ifdef BITMAP_USE_TREE
void
bitmap_info_init(bitmap_info_t *binfo, size_t nbits) {
unsigned i;
size_t group_count;
assert(nbits > 0);
assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
/*
* Compute the number of groups necessary to store nbits bits, and
* progressively work upward through the levels until reaching a level
* that requires only one group.
*/
binfo->levels[0].group_offset = 0;
group_count = BITMAP_BITS2GROUPS(nbits);
for (i = 1; group_count > 1; i++) {
assert(i < BITMAP_MAX_LEVELS);
binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
+ group_count;
group_count = BITMAP_BITS2GROUPS(group_count);
}
binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
+ group_count;
assert(binfo->levels[i].group_offset <= BITMAP_GROUPS_MAX);
binfo->nlevels = i;
binfo->nbits = nbits;
}
static size_t
bitmap_info_ngroups(const bitmap_info_t *binfo) {
return binfo->levels[binfo->nlevels].group_offset;
}
void
bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) {
size_t extra;
unsigned i;
/*
* Bits are actually inverted with regard to the external bitmap
* interface.
*/
if (fill) {
/* The "filled" bitmap starts out with all 0 bits. */
memset(bitmap, 0, bitmap_size(binfo));
return;
}
/*
* The "empty" bitmap starts out with all 1 bits, except for trailing
* unused bits (if any). Note that each group uses bit 0 to correspond
* to the first logical bit in the group, so extra bits are the most
* significant bits of the last group.
*/
memset(bitmap, 0xffU, bitmap_size(binfo));
extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
& BITMAP_GROUP_NBITS_MASK;
if (extra != 0) {
bitmap[binfo->levels[1].group_offset - 1] >>= extra;
}
for (i = 1; i < binfo->nlevels; i++) {
size_t group_count = binfo->levels[i].group_offset -
binfo->levels[i-1].group_offset;
extra = (BITMAP_GROUP_NBITS - (group_count &
BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK;
if (extra != 0) {
bitmap[binfo->levels[i+1].group_offset - 1] >>= extra;
}
}
}
#else /* BITMAP_USE_TREE */
void
bitmap_info_init(bitmap_info_t *binfo, size_t nbits) {
assert(nbits > 0);
assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
binfo->ngroups = BITMAP_BITS2GROUPS(nbits);
binfo->nbits = nbits;
}
static size_t
bitmap_info_ngroups(const bitmap_info_t *binfo) {
return binfo->ngroups;
}
void
bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) {
size_t extra;
if (fill) {
memset(bitmap, 0, bitmap_size(binfo));
return;
}
memset(bitmap, 0xffU, bitmap_size(binfo));
extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
& BITMAP_GROUP_NBITS_MASK;
if (extra != 0) {
bitmap[binfo->ngroups - 1] >>= extra;
}
}
#endif /* BITMAP_USE_TREE */
size_t
bitmap_size(const bitmap_info_t *binfo) {
return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP);
}

View file

@ -0,0 +1,144 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/buf_writer.h"
#include "jemalloc/internal/malloc_io.h"
static void *
buf_writer_allocate_internal_buf(tsdn_t *tsdn, size_t buf_len) {
#ifdef JEMALLOC_JET
if (buf_len > SC_LARGE_MAXCLASS) {
return NULL;
}
#else
assert(buf_len <= SC_LARGE_MAXCLASS);
#endif
return iallocztm(tsdn, buf_len, sz_size2index(buf_len), false, NULL,
true, arena_get(tsdn, 0, false), true);
}
static void
buf_writer_free_internal_buf(tsdn_t *tsdn, void *buf) {
if (buf != NULL) {
idalloctm(tsdn, buf, NULL, NULL, true, true);
}
}
static void
buf_writer_assert(buf_writer_t *buf_writer) {
assert(buf_writer != NULL);
assert(buf_writer->write_cb != NULL);
if (buf_writer->buf != NULL) {
assert(buf_writer->buf_size > 0);
} else {
assert(buf_writer->buf_size == 0);
assert(buf_writer->internal_buf);
}
assert(buf_writer->buf_end <= buf_writer->buf_size);
}
bool
buf_writer_init(tsdn_t *tsdn, buf_writer_t *buf_writer, write_cb_t *write_cb,
void *cbopaque, char *buf, size_t buf_len) {
if (write_cb != NULL) {
buf_writer->write_cb = write_cb;
} else {
buf_writer->write_cb = je_malloc_message != NULL ?
je_malloc_message : wrtmessage;
}
buf_writer->cbopaque = cbopaque;
assert(buf_len >= 2);
if (buf != NULL) {
buf_writer->buf = buf;
buf_writer->internal_buf = false;
} else {
buf_writer->buf = buf_writer_allocate_internal_buf(tsdn,
buf_len);
buf_writer->internal_buf = true;
}
if (buf_writer->buf != NULL) {
buf_writer->buf_size = buf_len - 1; /* Allowing for '\0'. */
} else {
buf_writer->buf_size = 0;
}
buf_writer->buf_end = 0;
buf_writer_assert(buf_writer);
return buf_writer->buf == NULL;
}
void
buf_writer_flush(buf_writer_t *buf_writer) {
buf_writer_assert(buf_writer);
if (buf_writer->buf == NULL) {
return;
}
buf_writer->buf[buf_writer->buf_end] = '\0';
buf_writer->write_cb(buf_writer->cbopaque, buf_writer->buf);
buf_writer->buf_end = 0;
buf_writer_assert(buf_writer);
}
void
buf_writer_cb(void *buf_writer_arg, const char *s) {
buf_writer_t *buf_writer = (buf_writer_t *)buf_writer_arg;
buf_writer_assert(buf_writer);
if (buf_writer->buf == NULL) {
buf_writer->write_cb(buf_writer->cbopaque, s);
return;
}
size_t i, slen, n;
for (i = 0, slen = strlen(s); i < slen; i += n) {
if (buf_writer->buf_end == buf_writer->buf_size) {
buf_writer_flush(buf_writer);
}
size_t s_remain = slen - i;
size_t buf_remain = buf_writer->buf_size - buf_writer->buf_end;
n = s_remain < buf_remain ? s_remain : buf_remain;
memcpy(buf_writer->buf + buf_writer->buf_end, s + i, n);
buf_writer->buf_end += n;
buf_writer_assert(buf_writer);
}
assert(i == slen);
}
void
buf_writer_terminate(tsdn_t *tsdn, buf_writer_t *buf_writer) {
buf_writer_assert(buf_writer);
buf_writer_flush(buf_writer);
if (buf_writer->internal_buf) {
buf_writer_free_internal_buf(tsdn, buf_writer->buf);
}
}
void
buf_writer_pipe(buf_writer_t *buf_writer, read_cb_t *read_cb,
void *read_cbopaque) {
/*
* A tiny local buffer in case the buffered writer failed to allocate
* at init.
*/
static char backup_buf[16];
static buf_writer_t backup_buf_writer;
buf_writer_assert(buf_writer);
assert(read_cb != NULL);
if (buf_writer->buf == NULL) {
buf_writer_init(TSDN_NULL, &backup_buf_writer,
buf_writer->write_cb, buf_writer->cbopaque, backup_buf,
sizeof(backup_buf));
buf_writer = &backup_buf_writer;
}
assert(buf_writer->buf != NULL);
ssize_t nread = 0;
do {
buf_writer->buf_end += nread;
buf_writer_assert(buf_writer);
if (buf_writer->buf_end == buf_writer->buf_size) {
buf_writer_flush(buf_writer);
}
nread = read_cb(read_cbopaque,
buf_writer->buf + buf_writer->buf_end,
buf_writer->buf_size - buf_writer->buf_end);
} while (nread > 0);
buf_writer_flush(buf_writer);
}

View file

@ -0,0 +1,99 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/cache_bin.h"
#include "jemalloc/internal/safety_check.h"
void
cache_bin_info_init(cache_bin_info_t *info,
cache_bin_sz_t ncached_max) {
assert(ncached_max <= CACHE_BIN_NCACHED_MAX);
size_t stack_size = (size_t)ncached_max * sizeof(void *);
assert(stack_size < ((size_t)1 << (sizeof(cache_bin_sz_t) * 8)));
info->ncached_max = (cache_bin_sz_t)ncached_max;
}
void
cache_bin_info_compute_alloc(cache_bin_info_t *infos, szind_t ninfos,
size_t *size, size_t *alignment) {
/* For the total bin stack region (per tcache), reserve 2 more slots so
* that
* 1) the empty position can be safely read on the fast path before
* checking "is_empty"; and
* 2) the cur_ptr can go beyond the empty position by 1 step safely on
* the fast path (i.e. no overflow).
*/
*size = sizeof(void *) * 2;
for (szind_t i = 0; i < ninfos; i++) {
assert(infos[i].ncached_max > 0);
*size += infos[i].ncached_max * sizeof(void *);
}
/*
* Align to at least PAGE, to minimize the # of TLBs needed by the
* smaller sizes; also helps if the larger sizes don't get used at all.
*/
*alignment = PAGE;
}
void
cache_bin_preincrement(cache_bin_info_t *infos, szind_t ninfos, void *alloc,
size_t *cur_offset) {
if (config_debug) {
size_t computed_size;
size_t computed_alignment;
/* Pointer should be as aligned as we asked for. */
cache_bin_info_compute_alloc(infos, ninfos, &computed_size,
&computed_alignment);
assert(((uintptr_t)alloc & (computed_alignment - 1)) == 0);
}
*(uintptr_t *)((uintptr_t)alloc + *cur_offset) =
cache_bin_preceding_junk;
*cur_offset += sizeof(void *);
}
void
cache_bin_postincrement(cache_bin_info_t *infos, szind_t ninfos, void *alloc,
size_t *cur_offset) {
*(uintptr_t *)((uintptr_t)alloc + *cur_offset) =
cache_bin_trailing_junk;
*cur_offset += sizeof(void *);
}
void
cache_bin_init(cache_bin_t *bin, cache_bin_info_t *info, void *alloc,
size_t *cur_offset) {
/*
* The full_position points to the lowest available space. Allocations
* will access the slots toward higher addresses (for the benefit of
* adjacent prefetch).
*/
void *stack_cur = (void *)((uintptr_t)alloc + *cur_offset);
void *full_position = stack_cur;
uint16_t bin_stack_size = info->ncached_max * sizeof(void *);
*cur_offset += bin_stack_size;
void *empty_position = (void *)((uintptr_t)alloc + *cur_offset);
/* Init to the empty position. */
bin->stack_head = (void **)empty_position;
bin->low_bits_low_water = (uint16_t)(uintptr_t)bin->stack_head;
bin->low_bits_full = (uint16_t)(uintptr_t)full_position;
bin->low_bits_empty = (uint16_t)(uintptr_t)empty_position;
cache_bin_sz_t free_spots = cache_bin_diff(bin,
bin->low_bits_full, (uint16_t)(uintptr_t)bin->stack_head,
/* racy */ false);
assert(free_spots == bin_stack_size);
assert(cache_bin_ncached_get_local(bin, info) == 0);
assert(cache_bin_empty_position_get(bin) == empty_position);
assert(bin_stack_size > 0 || empty_position == full_position);
}
bool
cache_bin_still_zero_initialized(cache_bin_t *bin) {
return bin->stack_head == NULL;
}

569
BeefRT/JEMalloc/src/ckh.c Normal file
View file

@ -0,0 +1,569 @@
/*
*******************************************************************************
* Implementation of (2^1+,2) cuckoo hashing, where 2^1+ indicates that each
* hash bucket contains 2^n cells, for n >= 1, and 2 indicates that two hash
* functions are employed. The original cuckoo hashing algorithm was described
* in:
*
* Pagh, R., F.F. Rodler (2004) Cuckoo Hashing. Journal of Algorithms
* 51(2):122-144.
*
* Generalization of cuckoo hashing was discussed in:
*
* Erlingsson, U., M. Manasse, F. McSherry (2006) A cool and practical
* alternative to traditional hash tables. In Proceedings of the 7th
* Workshop on Distributed Data and Structures (WDAS'06), Santa Clara, CA,
* January 2006.
*
* This implementation uses precisely two hash functions because that is the
* fewest that can work, and supporting multiple hashes is an implementation
* burden. Here is a reproduction of Figure 1 from Erlingsson et al. (2006)
* that shows approximate expected maximum load factors for various
* configurations:
*
* | #cells/bucket |
* #hashes | 1 | 2 | 4 | 8 |
* --------+-------+-------+-------+-------+
* 1 | 0.006 | 0.006 | 0.03 | 0.12 |
* 2 | 0.49 | 0.86 |>0.93< |>0.96< |
* 3 | 0.91 | 0.97 | 0.98 | 0.999 |
* 4 | 0.97 | 0.99 | 0.999 | |
*
* The number of cells per bucket is chosen such that a bucket fits in one cache
* line. So, on 32- and 64-bit systems, we use (8,2) and (4,2) cuckoo hashing,
* respectively.
*
******************************************************************************/
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/util.h"
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static bool ckh_grow(tsd_t *tsd, ckh_t *ckh);
static void ckh_shrink(tsd_t *tsd, ckh_t *ckh);
/******************************************************************************/
/*
* Search bucket for key and return the cell number if found; SIZE_T_MAX
* otherwise.
*/
static size_t
ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) {
ckhc_t *cell;
unsigned i;
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
if (cell->key != NULL && ckh->keycomp(key, cell->key)) {
return (bucket << LG_CKH_BUCKET_CELLS) + i;
}
}
return SIZE_T_MAX;
}
/*
* Search table for key and return cell number if found; SIZE_T_MAX otherwise.
*/
static size_t
ckh_isearch(ckh_t *ckh, const void *key) {
size_t hashes[2], bucket, cell;
assert(ckh != NULL);
ckh->hash(key, hashes);
/* Search primary bucket. */
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
cell = ckh_bucket_search(ckh, bucket, key);
if (cell != SIZE_T_MAX) {
return cell;
}
/* Search secondary bucket. */
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
cell = ckh_bucket_search(ckh, bucket, key);
return cell;
}
static bool
ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
const void *data) {
ckhc_t *cell;
unsigned offset, i;
/*
* Cycle through the cells in the bucket, starting at a random position.
* The randomness avoids worst-case search overhead as buckets fill up.
*/
offset = (unsigned)prng_lg_range_u64(&ckh->prng_state,
LG_CKH_BUCKET_CELLS);
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
if (cell->key == NULL) {
cell->key = key;
cell->data = data;
ckh->count++;
return false;
}
}
return true;
}
/*
* No space is available in bucket. Randomly evict an item, then try to find an
* alternate location for that item. Iteratively repeat this
* eviction/relocation procedure until either success or detection of an
* eviction/relocation bucket cycle.
*/
static bool
ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
void const **argdata) {
const void *key, *data, *tkey, *tdata;
ckhc_t *cell;
size_t hashes[2], bucket, tbucket;
unsigned i;
bucket = argbucket;
key = *argkey;
data = *argdata;
while (true) {
/*
* Choose a random item within the bucket to evict. This is
* critical to correct function, because without (eventually)
* evicting all items within a bucket during iteration, it
* would be possible to get stuck in an infinite loop if there
* were an item for which both hashes indicated the same
* bucket.
*/
i = (unsigned)prng_lg_range_u64(&ckh->prng_state,
LG_CKH_BUCKET_CELLS);
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
assert(cell->key != NULL);
/* Swap cell->{key,data} and {key,data} (evict). */
tkey = cell->key; tdata = cell->data;
cell->key = key; cell->data = data;
key = tkey; data = tdata;
#ifdef CKH_COUNT
ckh->nrelocs++;
#endif
/* Find the alternate bucket for the evicted item. */
ckh->hash(key, hashes);
tbucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (tbucket == bucket) {
tbucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets)
- 1);
/*
* It may be that (tbucket == bucket) still, if the
* item's hashes both indicate this bucket. However,
* we are guaranteed to eventually escape this bucket
* during iteration, assuming pseudo-random item
* selection (true randomness would make infinite
* looping a remote possibility). The reason we can
* never get trapped forever is that there are two
* cases:
*
* 1) This bucket == argbucket, so we will quickly
* detect an eviction cycle and terminate.
* 2) An item was evicted to this bucket from another,
* which means that at least one item in this bucket
* has hashes that indicate distinct buckets.
*/
}
/* Check for a cycle. */
if (tbucket == argbucket) {
*argkey = key;
*argdata = data;
return true;
}
bucket = tbucket;
if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
return false;
}
}
}
static bool
ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) {
size_t hashes[2], bucket;
const void *key = *argkey;
const void *data = *argdata;
ckh->hash(key, hashes);
/* Try to insert in primary bucket. */
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
return false;
}
/* Try to insert in secondary bucket. */
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
return false;
}
/*
* Try to find a place for this item via iterative eviction/relocation.
*/
return ckh_evict_reloc_insert(ckh, bucket, argkey, argdata);
}
/*
* Try to rebuild the hash table from scratch by inserting all items from the
* old table into the new.
*/
static bool
ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) {
size_t count, i, nins;
const void *key, *data;
count = ckh->count;
ckh->count = 0;
for (i = nins = 0; nins < count; i++) {
if (aTab[i].key != NULL) {
key = aTab[i].key;
data = aTab[i].data;
if (ckh_try_insert(ckh, &key, &data)) {
ckh->count = count;
return true;
}
nins++;
}
}
return false;
}
static bool
ckh_grow(tsd_t *tsd, ckh_t *ckh) {
bool ret;
ckhc_t *tab, *ttab;
unsigned lg_prevbuckets, lg_curcells;
#ifdef CKH_COUNT
ckh->ngrows++;
#endif
/*
* It is possible (though unlikely, given well behaved hashes) that the
* table will have to be doubled more than once in order to create a
* usable table.
*/
lg_prevbuckets = ckh->lg_curbuckets;
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS;
while (true) {
size_t usize;
lg_curcells++;
usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (unlikely(usize == 0
|| usize > SC_LARGE_MAXCLASS)) {
ret = true;
goto label_return;
}
tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE,
true, NULL, true, arena_ichoose(tsd, NULL));
if (tab == NULL) {
ret = true;
goto label_return;
}
/* Swap in new table. */
ttab = ckh->tab;
ckh->tab = tab;
tab = ttab;
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (!ckh_rebuild(ckh, tab)) {
idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true);
break;
}
/* Rebuilding failed, so back out partially rebuilt table. */
idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
}
ret = false;
label_return:
return ret;
}
static void
ckh_shrink(tsd_t *tsd, ckh_t *ckh) {
ckhc_t *tab, *ttab;
size_t usize;
unsigned lg_prevbuckets, lg_curcells;
/*
* It is possible (though unlikely, given well behaved hashes) that the
* table rebuild will fail.
*/
lg_prevbuckets = ckh->lg_curbuckets;
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
return;
}
tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL,
true, arena_ichoose(tsd, NULL));
if (tab == NULL) {
/*
* An OOM error isn't worth propagating, since it doesn't
* prevent this or future operations from proceeding.
*/
return;
}
/* Swap in new table. */
ttab = ckh->tab;
ckh->tab = tab;
tab = ttab;
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (!ckh_rebuild(ckh, tab)) {
idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true);
#ifdef CKH_COUNT
ckh->nshrinks++;
#endif
return;
}
/* Rebuilding failed, so back out partially rebuilt table. */
idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
#ifdef CKH_COUNT
ckh->nshrinkfails++;
#endif
}
bool
ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *ckh_hash,
ckh_keycomp_t *keycomp) {
bool ret;
size_t mincells, usize;
unsigned lg_mincells;
assert(minitems > 0);
assert(ckh_hash != NULL);
assert(keycomp != NULL);
#ifdef CKH_COUNT
ckh->ngrows = 0;
ckh->nshrinks = 0;
ckh->nshrinkfails = 0;
ckh->ninserts = 0;
ckh->nrelocs = 0;
#endif
ckh->prng_state = 42; /* Value doesn't really matter. */
ckh->count = 0;
/*
* Find the minimum power of 2 that is large enough to fit minitems
* entries. We are using (2+,2) cuckoo hashing, which has an expected
* maximum load factor of at least ~0.86, so 0.75 is a conservative load
* factor that will typically allow mincells items to fit without ever
* growing the table.
*/
assert(LG_CKH_BUCKET_CELLS > 0);
mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2;
for (lg_mincells = LG_CKH_BUCKET_CELLS;
(ZU(1) << lg_mincells) < mincells;
lg_mincells++) {
/* Do nothing. */
}
ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
ckh->hash = ckh_hash;
ckh->keycomp = keycomp;
usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
ret = true;
goto label_return;
}
ckh->tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true,
NULL, true, arena_ichoose(tsd, NULL));
if (ckh->tab == NULL) {
ret = true;
goto label_return;
}
ret = false;
label_return:
return ret;
}
void
ckh_delete(tsd_t *tsd, ckh_t *ckh) {
assert(ckh != NULL);
#ifdef CKH_VERBOSE
malloc_printf(
"%s(%p): ngrows: %"FMTu64", nshrinks: %"FMTu64","
" nshrinkfails: %"FMTu64", ninserts: %"FMTu64","
" nrelocs: %"FMTu64"\n", __func__, ckh,
(unsigned long long)ckh->ngrows,
(unsigned long long)ckh->nshrinks,
(unsigned long long)ckh->nshrinkfails,
(unsigned long long)ckh->ninserts,
(unsigned long long)ckh->nrelocs);
#endif
idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
if (config_debug) {
memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t));
}
}
size_t
ckh_count(ckh_t *ckh) {
assert(ckh != NULL);
return ckh->count;
}
bool
ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) {
size_t i, ncells;
for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets +
LG_CKH_BUCKET_CELLS)); i < ncells; i++) {
if (ckh->tab[i].key != NULL) {
if (key != NULL) {
*key = (void *)ckh->tab[i].key;
}
if (data != NULL) {
*data = (void *)ckh->tab[i].data;
}
*tabind = i + 1;
return false;
}
}
return true;
}
bool
ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) {
bool ret;
assert(ckh != NULL);
assert(ckh_search(ckh, key, NULL, NULL));
#ifdef CKH_COUNT
ckh->ninserts++;
#endif
while (ckh_try_insert(ckh, &key, &data)) {
if (ckh_grow(tsd, ckh)) {
ret = true;
goto label_return;
}
}
ret = false;
label_return:
return ret;
}
bool
ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
void **data) {
size_t cell;
assert(ckh != NULL);
cell = ckh_isearch(ckh, searchkey);
if (cell != SIZE_T_MAX) {
if (key != NULL) {
*key = (void *)ckh->tab[cell].key;
}
if (data != NULL) {
*data = (void *)ckh->tab[cell].data;
}
ckh->tab[cell].key = NULL;
ckh->tab[cell].data = NULL; /* Not necessary. */
ckh->count--;
/* Try to halve the table if it is less than 1/4 full. */
if (ckh->count < (ZU(1) << (ckh->lg_curbuckets
+ LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets
> ckh->lg_minbuckets) {
/* Ignore error due to OOM. */
ckh_shrink(tsd, ckh);
}
return false;
}
return true;
}
bool
ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) {
size_t cell;
assert(ckh != NULL);
cell = ckh_isearch(ckh, searchkey);
if (cell != SIZE_T_MAX) {
if (key != NULL) {
*key = (void *)ckh->tab[cell].key;
}
if (data != NULL) {
*data = (void *)ckh->tab[cell].data;
}
return false;
}
return true;
}
void
ckh_string_hash(const void *key, size_t r_hash[2]) {
hash(key, strlen((const char *)key), 0x94122f33U, r_hash);
}
bool
ckh_string_keycomp(const void *k1, const void *k2) {
assert(k1 != NULL);
assert(k2 != NULL);
return !strcmp((char *)k1, (char *)k2);
}
void
ckh_pointer_hash(const void *key, size_t r_hash[2]) {
union {
const void *v;
size_t i;
} u;
assert(sizeof(u.v) == sizeof(u.i));
u.v = key;
hash(&u.i, sizeof(u.i), 0xd983396eU, r_hash);
}
bool
ckh_pointer_keycomp(const void *k1, const void *k2) {
return (k1 == k2);
}

View file

@ -0,0 +1,30 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/counter.h"
bool
counter_accum_init(counter_accum_t *counter, uint64_t interval) {
if (LOCKEDINT_MTX_INIT(counter->mtx, "counter_accum",
WITNESS_RANK_COUNTER_ACCUM, malloc_mutex_rank_exclusive)) {
return true;
}
locked_init_u64_unsynchronized(&counter->accumbytes, 0);
counter->interval = interval;
return false;
}
void
counter_prefork(tsdn_t *tsdn, counter_accum_t *counter) {
LOCKEDINT_MTX_PREFORK(tsdn, counter->mtx);
}
void
counter_postfork_parent(tsdn_t *tsdn, counter_accum_t *counter) {
LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, counter->mtx);
}
void
counter_postfork_child(tsdn_t *tsdn, counter_accum_t *counter) {
LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, counter->mtx);
}

4414
BeefRT/JEMalloc/src/ctl.c Normal file

File diff suppressed because it is too large Load diff

295
BeefRT/JEMalloc/src/decay.c Normal file
View file

@ -0,0 +1,295 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/decay.h"
static const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
#define STEP(step, h, x, y) \
h,
SMOOTHSTEP
#undef STEP
};
/*
* Generate a new deadline that is uniformly random within the next epoch after
* the current one.
*/
void
decay_deadline_init(decay_t *decay) {
nstime_copy(&decay->deadline, &decay->epoch);
nstime_add(&decay->deadline, &decay->interval);
if (decay_ms_read(decay) > 0) {
nstime_t jitter;
nstime_init(&jitter, prng_range_u64(&decay->jitter_state,
nstime_ns(&decay->interval)));
nstime_add(&decay->deadline, &jitter);
}
}
void
decay_reinit(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms) {
atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED);
if (decay_ms > 0) {
nstime_init(&decay->interval, (uint64_t)decay_ms *
KQU(1000000));
nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS);
}
nstime_copy(&decay->epoch, cur_time);
decay->jitter_state = (uint64_t)(uintptr_t)decay;
decay_deadline_init(decay);
decay->nunpurged = 0;
memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
}
bool
decay_init(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms) {
if (config_debug) {
for (size_t i = 0; i < sizeof(decay_t); i++) {
assert(((char *)decay)[i] == 0);
}
decay->ceil_npages = 0;
}
if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY,
malloc_mutex_rank_exclusive)) {
return true;
}
decay->purging = false;
decay_reinit(decay, cur_time, decay_ms);
return false;
}
bool
decay_ms_valid(ssize_t decay_ms) {
if (decay_ms < -1) {
return false;
}
if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX *
KQU(1000)) {
return true;
}
return false;
}
static void
decay_maybe_update_time(decay_t *decay, nstime_t *new_time) {
if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch,
new_time) > 0)) {
/*
* Time went backwards. Move the epoch back in time and
* generate a new deadline, with the expectation that time
* typically flows forward for long enough periods of time that
* epochs complete. Unfortunately, this strategy is susceptible
* to clock jitter triggering premature epoch advances, but
* clock jitter estimation and compensation isn't feasible here
* because calls into this code are event-driven.
*/
nstime_copy(&decay->epoch, new_time);
decay_deadline_init(decay);
} else {
/* Verify that time does not go backwards. */
assert(nstime_compare(&decay->epoch, new_time) <= 0);
}
}
static size_t
decay_backlog_npages_limit(const decay_t *decay) {
/*
* For each element of decay_backlog, multiply by the corresponding
* fixed-point smoothstep decay factor. Sum the products, then divide
* to round down to the nearest whole number of pages.
*/
uint64_t sum = 0;
for (unsigned i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
sum += decay->backlog[i] * h_steps[i];
}
size_t npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
return npages_limit_backlog;
}
/*
* Update backlog, assuming that 'nadvance_u64' time intervals have passed.
* Trailing 'nadvance_u64' records should be erased and 'current_npages' is
* placed as the newest record.
*/
static void
decay_backlog_update(decay_t *decay, uint64_t nadvance_u64,
size_t current_npages) {
if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
sizeof(size_t));
} else {
size_t nadvance_z = (size_t)nadvance_u64;
assert((uint64_t)nadvance_z == nadvance_u64);
memmove(decay->backlog, &decay->backlog[nadvance_z],
(SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
if (nadvance_z > 1) {
memset(&decay->backlog[SMOOTHSTEP_NSTEPS -
nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
}
}
size_t npages_delta = (current_npages > decay->nunpurged) ?
current_npages - decay->nunpurged : 0;
decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta;
if (config_debug) {
if (current_npages > decay->ceil_npages) {
decay->ceil_npages = current_npages;
}
size_t npages_limit = decay_backlog_npages_limit(decay);
assert(decay->ceil_npages >= npages_limit);
if (decay->ceil_npages > npages_limit) {
decay->ceil_npages = npages_limit;
}
}
}
static inline bool
decay_deadline_reached(const decay_t *decay, const nstime_t *time) {
return (nstime_compare(&decay->deadline, time) <= 0);
}
uint64_t
decay_npages_purge_in(decay_t *decay, nstime_t *time, size_t npages_new) {
uint64_t decay_interval_ns = decay_epoch_duration_ns(decay);
size_t n_epoch = (size_t)(nstime_ns(time) / decay_interval_ns);
uint64_t npages_purge;
if (n_epoch >= SMOOTHSTEP_NSTEPS) {
npages_purge = npages_new;
} else {
uint64_t h_steps_max = h_steps[SMOOTHSTEP_NSTEPS - 1];
assert(h_steps_max >=
h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
npages_purge = npages_new * (h_steps_max -
h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
npages_purge >>= SMOOTHSTEP_BFP;
}
return npages_purge;
}
bool
decay_maybe_advance_epoch(decay_t *decay, nstime_t *new_time,
size_t npages_current) {
/* Handle possible non-monotonicity of time. */
decay_maybe_update_time(decay, new_time);
if (!decay_deadline_reached(decay, new_time)) {
return false;
}
nstime_t delta;
nstime_copy(&delta, new_time);
nstime_subtract(&delta, &decay->epoch);
uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval);
assert(nadvance_u64 > 0);
/* Add nadvance_u64 decay intervals to epoch. */
nstime_copy(&delta, &decay->interval);
nstime_imultiply(&delta, nadvance_u64);
nstime_add(&decay->epoch, &delta);
/* Set a new deadline. */
decay_deadline_init(decay);
/* Update the backlog. */
decay_backlog_update(decay, nadvance_u64, npages_current);
decay->npages_limit = decay_backlog_npages_limit(decay);
decay->nunpurged = (decay->npages_limit > npages_current) ?
decay->npages_limit : npages_current;
return true;
}
/*
* Calculate how many pages should be purged after 'interval'.
*
* First, calculate how many pages should remain at the moment, then subtract
* the number of pages that should remain after 'interval'. The difference is
* how many pages should be purged until then.
*
* The number of pages that should remain at a specific moment is calculated
* like this: pages(now) = sum(backlog[i] * h_steps[i]). After 'interval'
* passes, backlog would shift 'interval' positions to the left and sigmoid
* curve would be applied starting with backlog[interval].
*
* The implementation doesn't directly map to the description, but it's
* essentially the same calculation, optimized to avoid iterating over
* [interval..SMOOTHSTEP_NSTEPS) twice.
*/
static inline size_t
decay_npurge_after_interval(decay_t *decay, size_t interval) {
size_t i;
uint64_t sum = 0;
for (i = 0; i < interval; i++) {
sum += decay->backlog[i] * h_steps[i];
}
for (; i < SMOOTHSTEP_NSTEPS; i++) {
sum += decay->backlog[i] *
(h_steps[i] - h_steps[i - interval]);
}
return (size_t)(sum >> SMOOTHSTEP_BFP);
}
uint64_t decay_ns_until_purge(decay_t *decay, size_t npages_current,
uint64_t npages_threshold) {
if (!decay_gradually(decay)) {
return DECAY_UNBOUNDED_TIME_TO_PURGE;
}
uint64_t decay_interval_ns = decay_epoch_duration_ns(decay);
assert(decay_interval_ns > 0);
if (npages_current == 0) {
unsigned i;
for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
if (decay->backlog[i] > 0) {
break;
}
}
if (i == SMOOTHSTEP_NSTEPS) {
/* No dirty pages recorded. Sleep indefinitely. */
return DECAY_UNBOUNDED_TIME_TO_PURGE;
}
}
if (npages_current <= npages_threshold) {
/* Use max interval. */
return decay_interval_ns * SMOOTHSTEP_NSTEPS;
}
/* Minimal 2 intervals to ensure reaching next epoch deadline. */
size_t lb = 2;
size_t ub = SMOOTHSTEP_NSTEPS;
size_t npurge_lb, npurge_ub;
npurge_lb = decay_npurge_after_interval(decay, lb);
if (npurge_lb > npages_threshold) {
return decay_interval_ns * lb;
}
npurge_ub = decay_npurge_after_interval(decay, ub);
if (npurge_ub < npages_threshold) {
return decay_interval_ns * ub;
}
unsigned n_search = 0;
size_t target, npurge;
while ((npurge_lb + npages_threshold < npurge_ub) && (lb + 2 < ub)) {
target = (lb + ub) / 2;
npurge = decay_npurge_after_interval(decay, target);
if (npurge > npages_threshold) {
ub = target;
npurge_ub = npurge;
} else {
lb = target;
npurge_lb = npurge;
}
assert(n_search < lg_floor(SMOOTHSTEP_NSTEPS) + 1);
++n_search;
}
return decay_interval_ns * (ub + lb) / 2;
}

55
BeefRT/JEMalloc/src/div.c Normal file
View file

@ -0,0 +1,55 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/div.h"
#include "jemalloc/internal/assert.h"
/*
* Suppose we have n = q * d, all integers. We know n and d, and want q = n / d.
*
* For any k, we have (here, all division is exact; not C-style rounding):
* floor(ceil(2^k / d) * n / 2^k) = floor((2^k + r) / d * n / 2^k), where
* r = (-2^k) mod d.
*
* Expanding this out:
* ... = floor(2^k / d * n / 2^k + r / d * n / 2^k)
* = floor(n / d + (r / d) * (n / 2^k)).
*
* The fractional part of n / d is 0 (because of the assumption that d divides n
* exactly), so we have:
* ... = n / d + floor((r / d) * (n / 2^k))
*
* So that our initial expression is equal to the quantity we seek, so long as
* (r / d) * (n / 2^k) < 1.
*
* r is a remainder mod d, so r < d and r / d < 1 always. We can make
* n / 2 ^ k < 1 by setting k = 32. This gets us a value of magic that works.
*/
void
div_init(div_info_t *div_info, size_t d) {
/* Nonsensical. */
assert(d != 0);
/*
* This would make the value of magic too high to fit into a uint32_t
* (we would want magic = 2^32 exactly). This would mess with code gen
* on 32-bit machines.
*/
assert(d != 1);
uint64_t two_to_k = ((uint64_t)1 << 32);
uint32_t magic = (uint32_t)(two_to_k / d);
/*
* We want magic = ceil(2^k / d), but C gives us floor. We have to
* increment it unless the result was exact (i.e. unless d is a power of
* two).
*/
if (two_to_k % d != 0) {
magic++;
}
div_info->magic = magic;
#ifdef JEMALLOC_DEBUG
div_info->d = d;
#endif
}

View file

@ -0,0 +1,35 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/san.h"
bool
ecache_init(tsdn_t *tsdn, ecache_t *ecache, extent_state_t state, unsigned ind,
bool delay_coalesce) {
if (malloc_mutex_init(&ecache->mtx, "extents", WITNESS_RANK_EXTENTS,
malloc_mutex_rank_exclusive)) {
return true;
}
ecache->state = state;
ecache->ind = ind;
ecache->delay_coalesce = delay_coalesce;
eset_init(&ecache->eset, state);
eset_init(&ecache->guarded_eset, state);
return false;
}
void
ecache_prefork(tsdn_t *tsdn, ecache_t *ecache) {
malloc_mutex_prefork(tsdn, &ecache->mtx);
}
void
ecache_postfork_parent(tsdn_t *tsdn, ecache_t *ecache) {
malloc_mutex_postfork_parent(tsdn, &ecache->mtx);
}
void
ecache_postfork_child(tsdn_t *tsdn, ecache_t *ecache) {
malloc_mutex_postfork_child(tsdn, &ecache->mtx);
}

View file

@ -0,0 +1,6 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
ph_gen(, edata_avail, edata_t, avail_link,
edata_esnead_comp)
ph_gen(, edata_heap, edata_t, heap_link, edata_snad_comp)

View file

@ -0,0 +1,154 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
bool
edata_cache_init(edata_cache_t *edata_cache, base_t *base) {
edata_avail_new(&edata_cache->avail);
/*
* This is not strictly necessary, since the edata_cache_t is only
* created inside an arena, which is zeroed on creation. But this is
* handy as a safety measure.
*/
atomic_store_zu(&edata_cache->count, 0, ATOMIC_RELAXED);
if (malloc_mutex_init(&edata_cache->mtx, "edata_cache",
WITNESS_RANK_EDATA_CACHE, malloc_mutex_rank_exclusive)) {
return true;
}
edata_cache->base = base;
return false;
}
edata_t *
edata_cache_get(tsdn_t *tsdn, edata_cache_t *edata_cache) {
malloc_mutex_lock(tsdn, &edata_cache->mtx);
edata_t *edata = edata_avail_first(&edata_cache->avail);
if (edata == NULL) {
malloc_mutex_unlock(tsdn, &edata_cache->mtx);
return base_alloc_edata(tsdn, edata_cache->base);
}
edata_avail_remove(&edata_cache->avail, edata);
atomic_load_sub_store_zu(&edata_cache->count, 1);
malloc_mutex_unlock(tsdn, &edata_cache->mtx);
return edata;
}
void
edata_cache_put(tsdn_t *tsdn, edata_cache_t *edata_cache, edata_t *edata) {
malloc_mutex_lock(tsdn, &edata_cache->mtx);
edata_avail_insert(&edata_cache->avail, edata);
atomic_load_add_store_zu(&edata_cache->count, 1);
malloc_mutex_unlock(tsdn, &edata_cache->mtx);
}
void
edata_cache_prefork(tsdn_t *tsdn, edata_cache_t *edata_cache) {
malloc_mutex_prefork(tsdn, &edata_cache->mtx);
}
void
edata_cache_postfork_parent(tsdn_t *tsdn, edata_cache_t *edata_cache) {
malloc_mutex_postfork_parent(tsdn, &edata_cache->mtx);
}
void
edata_cache_postfork_child(tsdn_t *tsdn, edata_cache_t *edata_cache) {
malloc_mutex_postfork_child(tsdn, &edata_cache->mtx);
}
void
edata_cache_fast_init(edata_cache_fast_t *ecs, edata_cache_t *fallback) {
edata_list_inactive_init(&ecs->list);
ecs->fallback = fallback;
ecs->disabled = false;
}
static void
edata_cache_fast_try_fill_from_fallback(tsdn_t *tsdn,
edata_cache_fast_t *ecs) {
edata_t *edata;
malloc_mutex_lock(tsdn, &ecs->fallback->mtx);
for (int i = 0; i < EDATA_CACHE_FAST_FILL; i++) {
edata = edata_avail_remove_first(&ecs->fallback->avail);
if (edata == NULL) {
break;
}
edata_list_inactive_append(&ecs->list, edata);
atomic_load_sub_store_zu(&ecs->fallback->count, 1);
}
malloc_mutex_unlock(tsdn, &ecs->fallback->mtx);
}
edata_t *
edata_cache_fast_get(tsdn_t *tsdn, edata_cache_fast_t *ecs) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_EDATA_CACHE, 0);
if (ecs->disabled) {
assert(edata_list_inactive_first(&ecs->list) == NULL);
return edata_cache_get(tsdn, ecs->fallback);
}
edata_t *edata = edata_list_inactive_first(&ecs->list);
if (edata != NULL) {
edata_list_inactive_remove(&ecs->list, edata);
return edata;
}
/* Slow path; requires synchronization. */
edata_cache_fast_try_fill_from_fallback(tsdn, ecs);
edata = edata_list_inactive_first(&ecs->list);
if (edata != NULL) {
edata_list_inactive_remove(&ecs->list, edata);
} else {
/*
* Slowest path (fallback was also empty); allocate something
* new.
*/
edata = base_alloc_edata(tsdn, ecs->fallback->base);
}
return edata;
}
static void
edata_cache_fast_flush_all(tsdn_t *tsdn, edata_cache_fast_t *ecs) {
/*
* You could imagine smarter cache management policies (like
* only flushing down to some threshold in anticipation of
* future get requests). But just flushing everything provides
* a good opportunity to defrag too, and lets us share code between the
* flush and disable pathways.
*/
edata_t *edata;
size_t nflushed = 0;
malloc_mutex_lock(tsdn, &ecs->fallback->mtx);
while ((edata = edata_list_inactive_first(&ecs->list)) != NULL) {
edata_list_inactive_remove(&ecs->list, edata);
edata_avail_insert(&ecs->fallback->avail, edata);
nflushed++;
}
atomic_load_add_store_zu(&ecs->fallback->count, nflushed);
malloc_mutex_unlock(tsdn, &ecs->fallback->mtx);
}
void
edata_cache_fast_put(tsdn_t *tsdn, edata_cache_fast_t *ecs, edata_t *edata) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_EDATA_CACHE, 0);
if (ecs->disabled) {
assert(edata_list_inactive_first(&ecs->list) == NULL);
edata_cache_put(tsdn, ecs->fallback, edata);
return;
}
/*
* Prepend rather than append, to do LIFO ordering in the hopes of some
* cache locality.
*/
edata_list_inactive_prepend(&ecs->list, edata);
}
void
edata_cache_fast_disable(tsdn_t *tsdn, edata_cache_fast_t *ecs) {
edata_cache_fast_flush_all(tsdn, ecs);
ecs->disabled = true;
}

View file

@ -0,0 +1,275 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/ehooks.h"
#include "jemalloc/internal/extent_mmap.h"
void
ehooks_init(ehooks_t *ehooks, extent_hooks_t *extent_hooks, unsigned ind) {
/* All other hooks are optional; this one is not. */
assert(extent_hooks->alloc != NULL);
ehooks->ind = ind;
ehooks_set_extent_hooks_ptr(ehooks, extent_hooks);
}
/*
* If the caller specifies (!*zero), it is still possible to receive zeroed
* memory, in which case *zero is toggled to true. arena_extent_alloc() takes
* advantage of this to avoid demanding zeroed extents, but taking advantage of
* them if they are returned.
*/
static void *
extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
void *ret;
assert(size != 0);
assert(alignment != 0);
/* "primary" dss. */
if (have_dss && dss_prec == dss_prec_primary && (ret =
extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
commit)) != NULL) {
return ret;
}
/* mmap. */
if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
!= NULL) {
return ret;
}
/* "secondary" dss. */
if (have_dss && dss_prec == dss_prec_secondary && (ret =
extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
commit)) != NULL) {
return ret;
}
/* All strategies for allocation failed. */
return NULL;
}
void *
ehooks_default_alloc_impl(tsdn_t *tsdn, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
arena_t *arena = arena_get(tsdn, arena_ind, false);
/* NULL arena indicates arena_create. */
assert(arena != NULL || alignment == HUGEPAGE);
dss_prec_t dss = (arena == NULL) ? dss_prec_disabled :
(dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_RELAXED);
void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment,
zero, commit, dss);
if (have_madvise_huge && ret) {
pages_set_thp_state(ret, size);
}
return ret;
}
static void *
ehooks_default_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
return ehooks_default_alloc_impl(tsdn_fetch(), new_addr, size,
ALIGNMENT_CEILING(alignment, PAGE), zero, commit, arena_ind);
}
bool
ehooks_default_dalloc_impl(void *addr, size_t size) {
if (!have_dss || !extent_in_dss(addr)) {
return extent_dalloc_mmap(addr, size);
}
return true;
}
static bool
ehooks_default_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size,
bool committed, unsigned arena_ind) {
return ehooks_default_dalloc_impl(addr, size);
}
void
ehooks_default_destroy_impl(void *addr, size_t size) {
if (!have_dss || !extent_in_dss(addr)) {
pages_unmap(addr, size);
}
}
static void
ehooks_default_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size,
bool committed, unsigned arena_ind) {
ehooks_default_destroy_impl(addr, size);
}
bool
ehooks_default_commit_impl(void *addr, size_t offset, size_t length) {
return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
length);
}
static bool
ehooks_default_commit(extent_hooks_t *extent_hooks, void *addr, size_t size,
size_t offset, size_t length, unsigned arena_ind) {
return ehooks_default_commit_impl(addr, offset, length);
}
bool
ehooks_default_decommit_impl(void *addr, size_t offset, size_t length) {
return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
length);
}
static bool
ehooks_default_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size,
size_t offset, size_t length, unsigned arena_ind) {
return ehooks_default_decommit_impl(addr, offset, length);
}
#ifdef PAGES_CAN_PURGE_LAZY
bool
ehooks_default_purge_lazy_impl(void *addr, size_t offset, size_t length) {
return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
length);
}
static bool
ehooks_default_purge_lazy(extent_hooks_t *extent_hooks, void *addr, size_t size,
size_t offset, size_t length, unsigned arena_ind) {
assert(addr != NULL);
assert((offset & PAGE_MASK) == 0);
assert(length != 0);
assert((length & PAGE_MASK) == 0);
return ehooks_default_purge_lazy_impl(addr, offset, length);
}
#endif
#ifdef PAGES_CAN_PURGE_FORCED
bool
ehooks_default_purge_forced_impl(void *addr, size_t offset, size_t length) {
return pages_purge_forced((void *)((uintptr_t)addr +
(uintptr_t)offset), length);
}
static bool
ehooks_default_purge_forced(extent_hooks_t *extent_hooks, void *addr,
size_t size, size_t offset, size_t length, unsigned arena_ind) {
assert(addr != NULL);
assert((offset & PAGE_MASK) == 0);
assert(length != 0);
assert((length & PAGE_MASK) == 0);
return ehooks_default_purge_forced_impl(addr, offset, length);
}
#endif
bool
ehooks_default_split_impl() {
if (!maps_coalesce) {
/*
* Without retain, only whole regions can be purged (required by
* MEM_RELEASE on Windows) -- therefore disallow splitting. See
* comments in extent_head_no_merge().
*/
return !opt_retain;
}
return false;
}
static bool
ehooks_default_split(extent_hooks_t *extent_hooks, void *addr, size_t size,
size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
return ehooks_default_split_impl();
}
bool
ehooks_default_merge_impl(tsdn_t *tsdn, void *addr_a, void *addr_b) {
assert(addr_a < addr_b);
/*
* For non-DSS cases --
* a) W/o maps_coalesce, merge is not always allowed (Windows):
* 1) w/o retain, never merge (first branch below).
* 2) with retain, only merge extents from the same VirtualAlloc
* region (in which case MEM_DECOMMIT is utilized for purging).
*
* b) With maps_coalesce, it's always possible to merge.
* 1) w/o retain, always allow merge (only about dirty / muzzy).
* 2) with retain, to preserve the SN / first-fit, merge is still
* disallowed if b is a head extent, i.e. no merging across
* different mmap regions.
*
* a2) and b2) are implemented in emap_try_acquire_edata_neighbor, and
* sanity checked in the second branch below.
*/
if (!maps_coalesce && !opt_retain) {
return true;
}
if (config_debug) {
edata_t *a = emap_edata_lookup(tsdn, &arena_emap_global,
addr_a);
bool head_a = edata_is_head_get(a);
edata_t *b = emap_edata_lookup(tsdn, &arena_emap_global,
addr_b);
bool head_b = edata_is_head_get(b);
emap_assert_mapped(tsdn, &arena_emap_global, a);
emap_assert_mapped(tsdn, &arena_emap_global, b);
assert(extent_neighbor_head_state_mergeable(head_a, head_b,
/* forward */ true));
}
if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
return true;
}
return false;
}
bool
ehooks_default_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
tsdn_t *tsdn = tsdn_fetch();
return ehooks_default_merge_impl(tsdn, addr_a, addr_b);
}
void
ehooks_default_zero_impl(void *addr, size_t size) {
/*
* By default, we try to zero out memory using OS-provided demand-zeroed
* pages. If the user has specifically requested hugepages, though, we
* don't want to purge in the middle of a hugepage (which would break it
* up), so we act conservatively and use memset.
*/
bool needs_memset = true;
if (opt_thp != thp_mode_always) {
needs_memset = pages_purge_forced(addr, size);
}
if (needs_memset) {
memset(addr, 0, size);
}
}
void
ehooks_default_guard_impl(void *guard1, void *guard2) {
pages_mark_guards(guard1, guard2);
}
void
ehooks_default_unguard_impl(void *guard1, void *guard2) {
pages_unmark_guards(guard1, guard2);
}
const extent_hooks_t ehooks_default_extent_hooks = {
ehooks_default_alloc,
ehooks_default_dalloc,
ehooks_default_destroy,
ehooks_default_commit,
ehooks_default_decommit,
#ifdef PAGES_CAN_PURGE_LAZY
ehooks_default_purge_lazy,
#else
NULL,
#endif
#ifdef PAGES_CAN_PURGE_FORCED
ehooks_default_purge_forced,
#else
NULL,
#endif
ehooks_default_split,
ehooks_default_merge
};

386
BeefRT/JEMalloc/src/emap.c Normal file
View file

@ -0,0 +1,386 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/emap.h"
enum emap_lock_result_e {
emap_lock_result_success,
emap_lock_result_failure,
emap_lock_result_no_extent
};
typedef enum emap_lock_result_e emap_lock_result_t;
bool
emap_init(emap_t *emap, base_t *base, bool zeroed) {
return rtree_new(&emap->rtree, base, zeroed);
}
void
emap_update_edata_state(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
extent_state_t state) {
witness_assert_positive_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE);
edata_state_set(edata, state);
EMAP_DECLARE_RTREE_CTX;
rtree_leaf_elm_t *elm1 = rtree_leaf_elm_lookup(tsdn, &emap->rtree,
rtree_ctx, (uintptr_t)edata_base_get(edata), /* dependent */ true,
/* init_missing */ false);
assert(elm1 != NULL);
rtree_leaf_elm_t *elm2 = edata_size_get(edata) == PAGE ? NULL :
rtree_leaf_elm_lookup(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)edata_last_get(edata), /* dependent */ true,
/* init_missing */ false);
rtree_leaf_elm_state_update(tsdn, &emap->rtree, elm1, elm2, state);
emap_assert_mapped(tsdn, emap, edata);
}
static inline edata_t *
emap_try_acquire_edata_neighbor_impl(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
extent_pai_t pai, extent_state_t expected_state, bool forward,
bool expanding) {
witness_assert_positive_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE);
assert(!edata_guarded_get(edata));
assert(!expanding || forward);
assert(!edata_state_in_transition(expected_state));
assert(expected_state == extent_state_dirty ||
expected_state == extent_state_muzzy ||
expected_state == extent_state_retained);
void *neighbor_addr = forward ? edata_past_get(edata) :
edata_before_get(edata);
/*
* This is subtle; the rtree code asserts that its input pointer is
* non-NULL, and this is a useful thing to check. But it's possible
* that edata corresponds to an address of (void *)PAGE (in practice,
* this has only been observed on FreeBSD when address-space
* randomization is on, but it could in principle happen anywhere). In
* this case, edata_before_get(edata) is NULL, triggering the assert.
*/
if (neighbor_addr == NULL) {
return NULL;
}
EMAP_DECLARE_RTREE_CTX;
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &emap->rtree,
rtree_ctx, (uintptr_t)neighbor_addr, /* dependent*/ false,
/* init_missing */ false);
if (elm == NULL) {
return NULL;
}
rtree_contents_t neighbor_contents = rtree_leaf_elm_read(tsdn,
&emap->rtree, elm, /* dependent */ true);
if (!extent_can_acquire_neighbor(edata, neighbor_contents, pai,
expected_state, forward, expanding)) {
return NULL;
}
/* From this point, the neighbor edata can be safely acquired. */
edata_t *neighbor = neighbor_contents.edata;
assert(edata_state_get(neighbor) == expected_state);
emap_update_edata_state(tsdn, emap, neighbor, extent_state_merging);
if (expanding) {
extent_assert_can_expand(edata, neighbor);
} else {
extent_assert_can_coalesce(edata, neighbor);
}
return neighbor;
}
edata_t *
emap_try_acquire_edata_neighbor(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
extent_pai_t pai, extent_state_t expected_state, bool forward) {
return emap_try_acquire_edata_neighbor_impl(tsdn, emap, edata, pai,
expected_state, forward, /* expand */ false);
}
edata_t *
emap_try_acquire_edata_neighbor_expand(tsdn_t *tsdn, emap_t *emap,
edata_t *edata, extent_pai_t pai, extent_state_t expected_state) {
/* Try expanding forward. */
return emap_try_acquire_edata_neighbor_impl(tsdn, emap, edata, pai,
expected_state, /* forward */ true, /* expand */ true);
}
void
emap_release_edata(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
extent_state_t new_state) {
assert(emap_edata_in_transition(tsdn, emap, edata));
assert(emap_edata_is_acquired(tsdn, emap, edata));
emap_update_edata_state(tsdn, emap, edata, new_state);
}
static bool
emap_rtree_leaf_elms_lookup(tsdn_t *tsdn, emap_t *emap, rtree_ctx_t *rtree_ctx,
const edata_t *edata, bool dependent, bool init_missing,
rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
*r_elm_a = rtree_leaf_elm_lookup(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)edata_base_get(edata), dependent, init_missing);
if (!dependent && *r_elm_a == NULL) {
return true;
}
assert(*r_elm_a != NULL);
*r_elm_b = rtree_leaf_elm_lookup(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)edata_last_get(edata), dependent, init_missing);
if (!dependent && *r_elm_b == NULL) {
return true;
}
assert(*r_elm_b != NULL);
return false;
}
static void
emap_rtree_write_acquired(tsdn_t *tsdn, emap_t *emap, rtree_leaf_elm_t *elm_a,
rtree_leaf_elm_t *elm_b, edata_t *edata, szind_t szind, bool slab) {
rtree_contents_t contents;
contents.edata = edata;
contents.metadata.szind = szind;
contents.metadata.slab = slab;
contents.metadata.is_head = (edata == NULL) ? false :
edata_is_head_get(edata);
contents.metadata.state = (edata == NULL) ? 0 : edata_state_get(edata);
rtree_leaf_elm_write(tsdn, &emap->rtree, elm_a, contents);
if (elm_b != NULL) {
rtree_leaf_elm_write(tsdn, &emap->rtree, elm_b, contents);
}
}
bool
emap_register_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
szind_t szind, bool slab) {
assert(edata_state_get(edata) == extent_state_active);
EMAP_DECLARE_RTREE_CTX;
rtree_leaf_elm_t *elm_a, *elm_b;
bool err = emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, edata,
false, true, &elm_a, &elm_b);
if (err) {
return true;
}
assert(rtree_leaf_elm_read(tsdn, &emap->rtree, elm_a,
/* dependent */ false).edata == NULL);
assert(rtree_leaf_elm_read(tsdn, &emap->rtree, elm_b,
/* dependent */ false).edata == NULL);
emap_rtree_write_acquired(tsdn, emap, elm_a, elm_b, edata, szind, slab);
return false;
}
/* Invoked *after* emap_register_boundary. */
void
emap_register_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
szind_t szind) {
EMAP_DECLARE_RTREE_CTX;
assert(edata_slab_get(edata));
assert(edata_state_get(edata) == extent_state_active);
if (config_debug) {
/* Making sure the boundary is registered already. */
rtree_leaf_elm_t *elm_a, *elm_b;
bool err = emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx,
edata, /* dependent */ true, /* init_missing */ false,
&elm_a, &elm_b);
assert(!err);
rtree_contents_t contents_a, contents_b;
contents_a = rtree_leaf_elm_read(tsdn, &emap->rtree, elm_a,
/* dependent */ true);
contents_b = rtree_leaf_elm_read(tsdn, &emap->rtree, elm_b,
/* dependent */ true);
assert(contents_a.edata == edata && contents_b.edata == edata);
assert(contents_a.metadata.slab && contents_b.metadata.slab);
}
rtree_contents_t contents;
contents.edata = edata;
contents.metadata.szind = szind;
contents.metadata.slab = true;
contents.metadata.state = extent_state_active;
contents.metadata.is_head = false; /* Not allowed to access. */
assert(edata_size_get(edata) > (2 << LG_PAGE));
rtree_write_range(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)edata_base_get(edata) + PAGE,
(uintptr_t)edata_last_get(edata) - PAGE, contents);
}
void
emap_deregister_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
/*
* The edata must be either in an acquired state, or protected by state
* based locks.
*/
if (!emap_edata_is_acquired(tsdn, emap, edata)) {
witness_assert_positive_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE);
}
EMAP_DECLARE_RTREE_CTX;
rtree_leaf_elm_t *elm_a, *elm_b;
emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, edata,
true, false, &elm_a, &elm_b);
emap_rtree_write_acquired(tsdn, emap, elm_a, elm_b, NULL, SC_NSIZES,
false);
}
void
emap_deregister_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
EMAP_DECLARE_RTREE_CTX;
assert(edata_slab_get(edata));
if (edata_size_get(edata) > (2 << LG_PAGE)) {
rtree_clear_range(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)edata_base_get(edata) + PAGE,
(uintptr_t)edata_last_get(edata) - PAGE);
}
}
void
emap_remap(tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind,
bool slab) {
EMAP_DECLARE_RTREE_CTX;
if (szind != SC_NSIZES) {
rtree_contents_t contents;
contents.edata = edata;
contents.metadata.szind = szind;
contents.metadata.slab = slab;
contents.metadata.is_head = edata_is_head_get(edata);
contents.metadata.state = edata_state_get(edata);
rtree_write(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)edata_addr_get(edata), contents);
/*
* Recall that this is called only for active->inactive and
* inactive->active transitions (since only active extents have
* meaningful values for szind and slab). Active, non-slab
* extents only need to handle lookups at their head (on
* deallocation), so we don't bother filling in the end
* boundary.
*
* For slab extents, we do the end-mapping change. This still
* leaves the interior unmodified; an emap_register_interior
* call is coming in those cases, though.
*/
if (slab && edata_size_get(edata) > PAGE) {
uintptr_t key = (uintptr_t)edata_past_get(edata)
- (uintptr_t)PAGE;
rtree_write(tsdn, &emap->rtree, rtree_ctx, key,
contents);
}
}
}
bool
emap_split_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
edata_t *edata, size_t size_a, edata_t *trail, size_t size_b) {
EMAP_DECLARE_RTREE_CTX;
/*
* We use incorrect constants for things like arena ind, zero, ranged,
* and commit state, and head status. This is a fake edata_t, used to
* facilitate a lookup.
*/
edata_t lead = {0};
edata_init(&lead, 0U, edata_addr_get(edata), size_a, false, 0, 0,
extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, &lead, false, true,
&prepare->lead_elm_a, &prepare->lead_elm_b);
emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, trail, false, true,
&prepare->trail_elm_a, &prepare->trail_elm_b);
if (prepare->lead_elm_a == NULL || prepare->lead_elm_b == NULL
|| prepare->trail_elm_a == NULL || prepare->trail_elm_b == NULL) {
return true;
}
return false;
}
void
emap_split_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
edata_t *lead, size_t size_a, edata_t *trail, size_t size_b) {
/*
* We should think about not writing to the lead leaf element. We can
* get into situations where a racing realloc-like call can disagree
* with a size lookup request. I think it's fine to declare that these
* situations are race bugs, but there's an argument to be made that for
* things like xallocx, a size lookup call should return either the old
* size or the new size, but not anything else.
*/
emap_rtree_write_acquired(tsdn, emap, prepare->lead_elm_a,
prepare->lead_elm_b, lead, SC_NSIZES, /* slab */ false);
emap_rtree_write_acquired(tsdn, emap, prepare->trail_elm_a,
prepare->trail_elm_b, trail, SC_NSIZES, /* slab */ false);
}
void
emap_merge_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
edata_t *lead, edata_t *trail) {
EMAP_DECLARE_RTREE_CTX;
emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, lead, true, false,
&prepare->lead_elm_a, &prepare->lead_elm_b);
emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, trail, true, false,
&prepare->trail_elm_a, &prepare->trail_elm_b);
}
void
emap_merge_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
edata_t *lead, edata_t *trail) {
rtree_contents_t clear_contents;
clear_contents.edata = NULL;
clear_contents.metadata.szind = SC_NSIZES;
clear_contents.metadata.slab = false;
clear_contents.metadata.is_head = false;
clear_contents.metadata.state = (extent_state_t)0;
if (prepare->lead_elm_b != NULL) {
rtree_leaf_elm_write(tsdn, &emap->rtree,
prepare->lead_elm_b, clear_contents);
}
rtree_leaf_elm_t *merged_b;
if (prepare->trail_elm_b != NULL) {
rtree_leaf_elm_write(tsdn, &emap->rtree,
prepare->trail_elm_a, clear_contents);
merged_b = prepare->trail_elm_b;
} else {
merged_b = prepare->trail_elm_a;
}
emap_rtree_write_acquired(tsdn, emap, prepare->lead_elm_a, merged_b,
lead, SC_NSIZES, false);
}
void
emap_do_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
EMAP_DECLARE_RTREE_CTX;
rtree_contents_t contents = rtree_read(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)edata_base_get(edata));
assert(contents.edata == edata);
assert(contents.metadata.is_head == edata_is_head_get(edata));
assert(contents.metadata.state == edata_state_get(edata));
}
void
emap_do_assert_not_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
emap_full_alloc_ctx_t context1 = {0};
emap_full_alloc_ctx_try_lookup(tsdn, emap, edata_base_get(edata),
&context1);
assert(context1.edata == NULL);
emap_full_alloc_ctx_t context2 = {0};
emap_full_alloc_ctx_try_lookup(tsdn, emap, edata_last_get(edata),
&context2);
assert(context2.edata == NULL);
}

282
BeefRT/JEMalloc/src/eset.c Normal file
View file

@ -0,0 +1,282 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/eset.h"
#define ESET_NPSIZES (SC_NPSIZES + 1)
static void
eset_bin_init(eset_bin_t *bin) {
edata_heap_new(&bin->heap);
/*
* heap_min doesn't need initialization; it gets filled in when the bin
* goes from non-empty to empty.
*/
}
static void
eset_bin_stats_init(eset_bin_stats_t *bin_stats) {
atomic_store_zu(&bin_stats->nextents, 0, ATOMIC_RELAXED);
atomic_store_zu(&bin_stats->nbytes, 0, ATOMIC_RELAXED);
}
void
eset_init(eset_t *eset, extent_state_t state) {
for (unsigned i = 0; i < ESET_NPSIZES; i++) {
eset_bin_init(&eset->bins[i]);
eset_bin_stats_init(&eset->bin_stats[i]);
}
fb_init(eset->bitmap, ESET_NPSIZES);
edata_list_inactive_init(&eset->lru);
eset->state = state;
}
size_t
eset_npages_get(eset_t *eset) {
return atomic_load_zu(&eset->npages, ATOMIC_RELAXED);
}
size_t
eset_nextents_get(eset_t *eset, pszind_t pind) {
return atomic_load_zu(&eset->bin_stats[pind].nextents, ATOMIC_RELAXED);
}
size_t
eset_nbytes_get(eset_t *eset, pszind_t pind) {
return atomic_load_zu(&eset->bin_stats[pind].nbytes, ATOMIC_RELAXED);
}
static void
eset_stats_add(eset_t *eset, pszind_t pind, size_t sz) {
size_t cur = atomic_load_zu(&eset->bin_stats[pind].nextents,
ATOMIC_RELAXED);
atomic_store_zu(&eset->bin_stats[pind].nextents, cur + 1,
ATOMIC_RELAXED);
cur = atomic_load_zu(&eset->bin_stats[pind].nbytes, ATOMIC_RELAXED);
atomic_store_zu(&eset->bin_stats[pind].nbytes, cur + sz,
ATOMIC_RELAXED);
}
static void
eset_stats_sub(eset_t *eset, pszind_t pind, size_t sz) {
size_t cur = atomic_load_zu(&eset->bin_stats[pind].nextents,
ATOMIC_RELAXED);
atomic_store_zu(&eset->bin_stats[pind].nextents, cur - 1,
ATOMIC_RELAXED);
cur = atomic_load_zu(&eset->bin_stats[pind].nbytes, ATOMIC_RELAXED);
atomic_store_zu(&eset->bin_stats[pind].nbytes, cur - sz,
ATOMIC_RELAXED);
}
void
eset_insert(eset_t *eset, edata_t *edata) {
assert(edata_state_get(edata) == eset->state);
size_t size = edata_size_get(edata);
size_t psz = sz_psz_quantize_floor(size);
pszind_t pind = sz_psz2ind(psz);
edata_cmp_summary_t edata_cmp_summary = edata_cmp_summary_get(edata);
if (edata_heap_empty(&eset->bins[pind].heap)) {
fb_set(eset->bitmap, ESET_NPSIZES, (size_t)pind);
/* Only element is automatically the min element. */
eset->bins[pind].heap_min = edata_cmp_summary;
} else {
/*
* There's already a min element; update the summary if we're
* about to insert a lower one.
*/
if (edata_cmp_summary_comp(edata_cmp_summary,
eset->bins[pind].heap_min) < 0) {
eset->bins[pind].heap_min = edata_cmp_summary;
}
}
edata_heap_insert(&eset->bins[pind].heap, edata);
if (config_stats) {
eset_stats_add(eset, pind, size);
}
edata_list_inactive_append(&eset->lru, edata);
size_t npages = size >> LG_PAGE;
/*
* All modifications to npages hold the mutex (as asserted above), so we
* don't need an atomic fetch-add; we can get by with a load followed by
* a store.
*/
size_t cur_eset_npages =
atomic_load_zu(&eset->npages, ATOMIC_RELAXED);
atomic_store_zu(&eset->npages, cur_eset_npages + npages,
ATOMIC_RELAXED);
}
void
eset_remove(eset_t *eset, edata_t *edata) {
assert(edata_state_get(edata) == eset->state ||
edata_state_in_transition(edata_state_get(edata)));
size_t size = edata_size_get(edata);
size_t psz = sz_psz_quantize_floor(size);
pszind_t pind = sz_psz2ind(psz);
if (config_stats) {
eset_stats_sub(eset, pind, size);
}
edata_cmp_summary_t edata_cmp_summary = edata_cmp_summary_get(edata);
edata_heap_remove(&eset->bins[pind].heap, edata);
if (edata_heap_empty(&eset->bins[pind].heap)) {
fb_unset(eset->bitmap, ESET_NPSIZES, (size_t)pind);
} else {
/*
* This is a little weird; we compare if the summaries are
* equal, rather than if the edata we removed was the heap
* minimum. The reason why is that getting the heap minimum
* can cause a pairing heap merge operation. We can avoid this
* if we only update the min if it's changed, in which case the
* summaries of the removed element and the min element should
* compare equal.
*/
if (edata_cmp_summary_comp(edata_cmp_summary,
eset->bins[pind].heap_min) == 0) {
eset->bins[pind].heap_min = edata_cmp_summary_get(
edata_heap_first(&eset->bins[pind].heap));
}
}
edata_list_inactive_remove(&eset->lru, edata);
size_t npages = size >> LG_PAGE;
/*
* As in eset_insert, we hold eset->mtx and so don't need atomic
* operations for updating eset->npages.
*/
size_t cur_extents_npages =
atomic_load_zu(&eset->npages, ATOMIC_RELAXED);
assert(cur_extents_npages >= npages);
atomic_store_zu(&eset->npages,
cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
}
/*
* Find an extent with size [min_size, max_size) to satisfy the alignment
* requirement. For each size, try only the first extent in the heap.
*/
static edata_t *
eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size,
size_t alignment) {
pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(min_size));
pszind_t pind_max = sz_psz2ind(sz_psz_quantize_ceil(max_size));
for (pszind_t i =
(pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)pind);
i < pind_max;
i = (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)i + 1)) {
assert(i < SC_NPSIZES);
assert(!edata_heap_empty(&eset->bins[i].heap));
edata_t *edata = edata_heap_first(&eset->bins[i].heap);
uintptr_t base = (uintptr_t)edata_base_get(edata);
size_t candidate_size = edata_size_get(edata);
assert(candidate_size >= min_size);
uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
PAGE_CEILING(alignment));
if (base > next_align || base + candidate_size <= next_align) {
/* Overflow or not crossing the next alignment. */
continue;
}
size_t leadsize = next_align - base;
if (candidate_size - leadsize >= min_size) {
return edata;
}
}
return NULL;
}
/*
* Do first-fit extent selection, i.e. select the oldest/lowest extent that is
* large enough.
*
* lg_max_fit is the (log of the) maximum ratio between the requested size and
* the returned size that we'll allow. This can reduce fragmentation by
* avoiding reusing and splitting large extents for smaller sizes. In practice,
* it's set to opt_lg_extent_max_active_fit for the dirty eset and SC_PTR_BITS
* for others.
*/
static edata_t *
eset_first_fit(eset_t *eset, size_t size, bool exact_only,
unsigned lg_max_fit) {
edata_t *ret = NULL;
edata_cmp_summary_t ret_summ JEMALLOC_CC_SILENCE_INIT({0});
pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(size));
if (exact_only) {
return edata_heap_empty(&eset->bins[pind].heap) ? NULL :
edata_heap_first(&eset->bins[pind].heap);
}
for (pszind_t i =
(pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)pind);
i < ESET_NPSIZES;
i = (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)i + 1)) {
assert(!edata_heap_empty(&eset->bins[i].heap));
if (lg_max_fit == SC_PTR_BITS) {
/*
* We'll shift by this below, and shifting out all the
* bits is undefined. Decreasing is safe, since the
* page size is larger than 1 byte.
*/
lg_max_fit = SC_PTR_BITS - 1;
}
if ((sz_pind2sz(i) >> lg_max_fit) > size) {
break;
}
if (ret == NULL || edata_cmp_summary_comp(
eset->bins[i].heap_min, ret_summ) < 0) {
/*
* We grab the edata as early as possible, even though
* we might change it later. Practically, a large
* portion of eset_fit calls succeed at the first valid
* index, so this doesn't cost much, and we get the
* effect of prefetching the edata as early as possible.
*/
edata_t *edata = edata_heap_first(&eset->bins[i].heap);
assert(edata_size_get(edata) >= size);
assert(ret == NULL || edata_snad_comp(edata, ret) < 0);
assert(ret == NULL || edata_cmp_summary_comp(
eset->bins[i].heap_min,
edata_cmp_summary_get(edata)) == 0);
ret = edata;
ret_summ = eset->bins[i].heap_min;
}
if (i == SC_NPSIZES) {
break;
}
assert(i < SC_NPSIZES);
}
return ret;
}
edata_t *
eset_fit(eset_t *eset, size_t esize, size_t alignment, bool exact_only,
unsigned lg_max_fit) {
size_t max_size = esize + PAGE_CEILING(alignment) - PAGE;
/* Beware size_t wrap-around. */
if (max_size < esize) {
return NULL;
}
edata_t *edata = eset_first_fit(eset, max_size, exact_only, lg_max_fit);
if (alignment > PAGE && edata == NULL) {
/*
* max_size guarantees the alignment requirement but is rather
* pessimistic. Next we try to satisfy the aligned allocation
* with sizes in [esize, max_size).
*/
edata = eset_fit_alignment(eset, esize, max_size, alignment);
}
return edata;
}

View file

@ -0,0 +1,8 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
void
exp_grow_init(exp_grow_t *exp_grow) {
exp_grow->next = sz_psz2ind(HUGEPAGE);
exp_grow->limit = sz_psz2ind(SC_LARGE_MAXCLASS);
}

1326
BeefRT/JEMalloc/src/extent.c Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,277 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/spin.h"
/******************************************************************************/
/* Data. */
const char *opt_dss = DSS_DEFAULT;
const char *dss_prec_names[] = {
"disabled",
"primary",
"secondary",
"N/A"
};
/*
* Current dss precedence default, used when creating new arenas. NB: This is
* stored as unsigned rather than dss_prec_t because in principle there's no
* guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use
* atomic operations to synchronize the setting.
*/
static atomic_u_t dss_prec_default = ATOMIC_INIT(
(unsigned)DSS_PREC_DEFAULT);
/* Base address of the DSS. */
static void *dss_base;
/* Atomic boolean indicating whether a thread is currently extending DSS. */
static atomic_b_t dss_extending;
/* Atomic boolean indicating whether the DSS is exhausted. */
static atomic_b_t dss_exhausted;
/* Atomic current upper limit on DSS addresses. */
static atomic_p_t dss_max;
/******************************************************************************/
static void *
extent_dss_sbrk(intptr_t increment) {
#ifdef JEMALLOC_DSS
return sbrk(increment);
#else
not_implemented();
return NULL;
#endif
}
dss_prec_t
extent_dss_prec_get(void) {
dss_prec_t ret;
if (!have_dss) {
return dss_prec_disabled;
}
ret = (dss_prec_t)atomic_load_u(&dss_prec_default, ATOMIC_ACQUIRE);
return ret;
}
bool
extent_dss_prec_set(dss_prec_t dss_prec) {
if (!have_dss) {
return (dss_prec != dss_prec_disabled);
}
atomic_store_u(&dss_prec_default, (unsigned)dss_prec, ATOMIC_RELEASE);
return false;
}
static void
extent_dss_extending_start(void) {
spin_t spinner = SPIN_INITIALIZER;
while (true) {
bool expected = false;
if (atomic_compare_exchange_weak_b(&dss_extending, &expected,
true, ATOMIC_ACQ_REL, ATOMIC_RELAXED)) {
break;
}
spin_adaptive(&spinner);
}
}
static void
extent_dss_extending_finish(void) {
assert(atomic_load_b(&dss_extending, ATOMIC_RELAXED));
atomic_store_b(&dss_extending, false, ATOMIC_RELEASE);
}
static void *
extent_dss_max_update(void *new_addr) {
/*
* Get the current end of the DSS as max_cur and assure that dss_max is
* up to date.
*/
void *max_cur = extent_dss_sbrk(0);
if (max_cur == (void *)-1) {
return NULL;
}
atomic_store_p(&dss_max, max_cur, ATOMIC_RELEASE);
/* Fixed new_addr can only be supported if it is at the edge of DSS. */
if (new_addr != NULL && max_cur != new_addr) {
return NULL;
}
return max_cur;
}
void *
extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit) {
edata_t *gap;
cassert(have_dss);
assert(size > 0);
assert(alignment == ALIGNMENT_CEILING(alignment, PAGE));
/*
* sbrk() uses a signed increment argument, so take care not to
* interpret a large allocation request as a negative increment.
*/
if ((intptr_t)size < 0) {
return NULL;
}
gap = edata_cache_get(tsdn, &arena->pa_shard.edata_cache);
if (gap == NULL) {
return NULL;
}
extent_dss_extending_start();
if (!atomic_load_b(&dss_exhausted, ATOMIC_ACQUIRE)) {
/*
* The loop is necessary to recover from races with other
* threads that are using the DSS for something other than
* malloc.
*/
while (true) {
void *max_cur = extent_dss_max_update(new_addr);
if (max_cur == NULL) {
goto label_oom;
}
bool head_state = opt_retain ? EXTENT_IS_HEAD :
EXTENT_NOT_HEAD;
/*
* Compute how much page-aligned gap space (if any) is
* necessary to satisfy alignment. This space can be
* recycled for later use.
*/
void *gap_addr_page = (void *)(PAGE_CEILING(
(uintptr_t)max_cur));
void *ret = (void *)ALIGNMENT_CEILING(
(uintptr_t)gap_addr_page, alignment);
size_t gap_size_page = (uintptr_t)ret -
(uintptr_t)gap_addr_page;
if (gap_size_page != 0) {
edata_init(gap, arena_ind_get(arena),
gap_addr_page, gap_size_page, false,
SC_NSIZES, extent_sn_next(
&arena->pa_shard.pac),
extent_state_active, false, true,
EXTENT_PAI_PAC, head_state);
}
/*
* Compute the address just past the end of the desired
* allocation space.
*/
void *dss_next = (void *)((uintptr_t)ret + size);
if ((uintptr_t)ret < (uintptr_t)max_cur ||
(uintptr_t)dss_next < (uintptr_t)max_cur) {
goto label_oom; /* Wrap-around. */
}
/* Compute the increment, including subpage bytes. */
void *gap_addr_subpage = max_cur;
size_t gap_size_subpage = (uintptr_t)ret -
(uintptr_t)gap_addr_subpage;
intptr_t incr = gap_size_subpage + size;
assert((uintptr_t)max_cur + incr == (uintptr_t)ret +
size);
/* Try to allocate. */
void *dss_prev = extent_dss_sbrk(incr);
if (dss_prev == max_cur) {
/* Success. */
atomic_store_p(&dss_max, dss_next,
ATOMIC_RELEASE);
extent_dss_extending_finish();
if (gap_size_page != 0) {
ehooks_t *ehooks = arena_get_ehooks(
arena);
extent_dalloc_gap(tsdn,
&arena->pa_shard.pac, ehooks, gap);
} else {
edata_cache_put(tsdn,
&arena->pa_shard.edata_cache, gap);
}
if (!*commit) {
*commit = pages_decommit(ret, size);
}
if (*zero && *commit) {
edata_t edata = {0};
ehooks_t *ehooks = arena_get_ehooks(
arena);
edata_init(&edata,
arena_ind_get(arena), ret, size,
size, false, SC_NSIZES,
extent_state_active, false, true,
EXTENT_PAI_PAC, head_state);
if (extent_purge_forced_wrapper(tsdn,
ehooks, &edata, 0, size)) {
memset(ret, 0, size);
}
}
return ret;
}
/*
* Failure, whether due to OOM or a race with a raw
* sbrk() call from outside the allocator.
*/
if (dss_prev == (void *)-1) {
/* OOM. */
atomic_store_b(&dss_exhausted, true,
ATOMIC_RELEASE);
goto label_oom;
}
}
}
label_oom:
extent_dss_extending_finish();
edata_cache_put(tsdn, &arena->pa_shard.edata_cache, gap);
return NULL;
}
static bool
extent_in_dss_helper(void *addr, void *max) {
return ((uintptr_t)addr >= (uintptr_t)dss_base && (uintptr_t)addr <
(uintptr_t)max);
}
bool
extent_in_dss(void *addr) {
cassert(have_dss);
return extent_in_dss_helper(addr, atomic_load_p(&dss_max,
ATOMIC_ACQUIRE));
}
bool
extent_dss_mergeable(void *addr_a, void *addr_b) {
void *max;
cassert(have_dss);
if ((uintptr_t)addr_a < (uintptr_t)dss_base && (uintptr_t)addr_b <
(uintptr_t)dss_base) {
return true;
}
max = atomic_load_p(&dss_max, ATOMIC_ACQUIRE);
return (extent_in_dss_helper(addr_a, max) ==
extent_in_dss_helper(addr_b, max));
}
void
extent_dss_boot(void) {
cassert(have_dss);
dss_base = extent_dss_sbrk(0);
atomic_store_b(&dss_extending, false, ATOMIC_RELAXED);
atomic_store_b(&dss_exhausted, dss_base == (void *)-1, ATOMIC_RELAXED);
atomic_store_p(&dss_max, dss_base, ATOMIC_RELAXED);
}
/******************************************************************************/

View file

@ -0,0 +1,41 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/extent_mmap.h"
/******************************************************************************/
/* Data. */
bool opt_retain =
#ifdef JEMALLOC_RETAIN
true
#else
false
#endif
;
/******************************************************************************/
void *
extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
bool *commit) {
assert(alignment == ALIGNMENT_CEILING(alignment, PAGE));
void *ret = pages_map(new_addr, size, alignment, commit);
if (ret == NULL) {
return NULL;
}
assert(ret != NULL);
if (*commit) {
*zero = true;
}
return ret;
}
bool
extent_dalloc_mmap(void *addr, size_t size) {
if (!opt_retain) {
pages_unmap(addr, size);
}
return opt_retain;
}

124
BeefRT/JEMalloc/src/fxp.c Normal file
View file

@ -0,0 +1,124 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/fxp.h"
static bool
fxp_isdigit(char c) {
return '0' <= c && c <= '9';
}
bool
fxp_parse(fxp_t *result, const char *str, char **end) {
/*
* Using malloc_strtoumax in this method isn't as handy as you might
* expect (I tried). In the fractional part, significant leading zeros
* mean that you still need to do your own parsing, now with trickier
* math. In the integer part, the casting (uintmax_t to uint32_t)
* forces more reasoning about bounds than just checking for overflow as
* we parse.
*/
uint32_t integer_part = 0;
const char *cur = str;
/* The string must start with a digit or a decimal point. */
if (*cur != '.' && !fxp_isdigit(*cur)) {
return true;
}
while ('0' <= *cur && *cur <= '9') {
integer_part *= 10;
integer_part += *cur - '0';
if (integer_part >= (1U << 16)) {
return true;
}
cur++;
}
/*
* We've parsed all digits at the beginning of the string, without
* overflow. Either we're done, or there's a fractional part.
*/
if (*cur != '.') {
*result = (integer_part << 16);
if (end != NULL) {
*end = (char *)cur;
}
return false;
}
/* There's a fractional part. */
cur++;
if (!fxp_isdigit(*cur)) {
/* Shouldn't end on the decimal point. */
return true;
}
/*
* We use a lot of precision for the fractional part, even though we'll
* discard most of it; this lets us get exact values for the important
* special case where the denominator is a small power of 2 (for
* instance, 1/512 == 0.001953125 is exactly representable even with
* only 16 bits of fractional precision). We need to left-shift by 16
* before dividing so we pick the number of digits to be
* floor(log(2**48)) = 14.
*/
uint64_t fractional_part = 0;
uint64_t frac_div = 1;
for (int i = 0; i < FXP_FRACTIONAL_PART_DIGITS; i++) {
fractional_part *= 10;
frac_div *= 10;
if (fxp_isdigit(*cur)) {
fractional_part += *cur - '0';
cur++;
}
}
/*
* We only parse the first maxdigits characters, but we can still ignore
* any digits after that.
*/
while (fxp_isdigit(*cur)) {
cur++;
}
assert(fractional_part < frac_div);
uint32_t fractional_repr = (uint32_t)(
(fractional_part << 16) / frac_div);
/* Success! */
*result = (integer_part << 16) + fractional_repr;
if (end != NULL) {
*end = (char *)cur;
}
return false;
}
void
fxp_print(fxp_t a, char buf[FXP_BUF_SIZE]) {
uint32_t integer_part = fxp_round_down(a);
uint32_t fractional_part = (a & ((1U << 16) - 1));
int leading_fraction_zeros = 0;
uint64_t fraction_digits = fractional_part;
for (int i = 0; i < FXP_FRACTIONAL_PART_DIGITS; i++) {
if (fraction_digits < (1U << 16)
&& fraction_digits * 10 >= (1U << 16)) {
leading_fraction_zeros = i;
}
fraction_digits *= 10;
}
fraction_digits >>= 16;
while (fraction_digits > 0 && fraction_digits % 10 == 0) {
fraction_digits /= 10;
}
size_t printed = malloc_snprintf(buf, FXP_BUF_SIZE, "%"FMTu32".",
integer_part);
for (int i = 0; i < leading_fraction_zeros; i++) {
buf[printed] = '0';
printed++;
}
malloc_snprintf(&buf[printed], FXP_BUF_SIZE - printed, "%"FMTu64,
fraction_digits);
}

195
BeefRT/JEMalloc/src/hook.c Normal file
View file

@ -0,0 +1,195 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/hook.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/seq.h"
typedef struct hooks_internal_s hooks_internal_t;
struct hooks_internal_s {
hooks_t hooks;
bool in_use;
};
seq_define(hooks_internal_t, hooks)
static atomic_u_t nhooks = ATOMIC_INIT(0);
static seq_hooks_t hooks[HOOK_MAX];
static malloc_mutex_t hooks_mu;
bool
hook_boot() {
return malloc_mutex_init(&hooks_mu, "hooks", WITNESS_RANK_HOOK,
malloc_mutex_rank_exclusive);
}
static void *
hook_install_locked(hooks_t *to_install) {
hooks_internal_t hooks_internal;
for (int i = 0; i < HOOK_MAX; i++) {
bool success = seq_try_load_hooks(&hooks_internal, &hooks[i]);
/* We hold mu; no concurrent access. */
assert(success);
if (!hooks_internal.in_use) {
hooks_internal.hooks = *to_install;
hooks_internal.in_use = true;
seq_store_hooks(&hooks[i], &hooks_internal);
atomic_store_u(&nhooks,
atomic_load_u(&nhooks, ATOMIC_RELAXED) + 1,
ATOMIC_RELAXED);
return &hooks[i];
}
}
return NULL;
}
void *
hook_install(tsdn_t *tsdn, hooks_t *to_install) {
malloc_mutex_lock(tsdn, &hooks_mu);
void *ret = hook_install_locked(to_install);
if (ret != NULL) {
tsd_global_slow_inc(tsdn);
}
malloc_mutex_unlock(tsdn, &hooks_mu);
return ret;
}
static void
hook_remove_locked(seq_hooks_t *to_remove) {
hooks_internal_t hooks_internal;
bool success = seq_try_load_hooks(&hooks_internal, to_remove);
/* We hold mu; no concurrent access. */
assert(success);
/* Should only remove hooks that were added. */
assert(hooks_internal.in_use);
hooks_internal.in_use = false;
seq_store_hooks(to_remove, &hooks_internal);
atomic_store_u(&nhooks, atomic_load_u(&nhooks, ATOMIC_RELAXED) - 1,
ATOMIC_RELAXED);
}
void
hook_remove(tsdn_t *tsdn, void *opaque) {
if (config_debug) {
char *hooks_begin = (char *)&hooks[0];
char *hooks_end = (char *)&hooks[HOOK_MAX];
char *hook = (char *)opaque;
assert(hooks_begin <= hook && hook < hooks_end
&& (hook - hooks_begin) % sizeof(seq_hooks_t) == 0);
}
malloc_mutex_lock(tsdn, &hooks_mu);
hook_remove_locked((seq_hooks_t *)opaque);
tsd_global_slow_dec(tsdn);
malloc_mutex_unlock(tsdn, &hooks_mu);
}
#define FOR_EACH_HOOK_BEGIN(hooks_internal_ptr) \
for (int for_each_hook_counter = 0; \
for_each_hook_counter < HOOK_MAX; \
for_each_hook_counter++) { \
bool for_each_hook_success = seq_try_load_hooks( \
(hooks_internal_ptr), &hooks[for_each_hook_counter]); \
if (!for_each_hook_success) { \
continue; \
} \
if (!(hooks_internal_ptr)->in_use) { \
continue; \
}
#define FOR_EACH_HOOK_END \
}
static bool *
hook_reentrantp() {
/*
* We prevent user reentrancy within hooks. This is basically just a
* thread-local bool that triggers an early-exit.
*
* We don't fold in_hook into reentrancy. There are two reasons for
* this:
* - Right now, we turn on reentrancy during things like extent hook
* execution. Allocating during extent hooks is not officially
* supported, but we don't want to break it for the time being. These
* sorts of allocations should probably still be hooked, though.
* - If a hook allocates, we may want it to be relatively fast (after
* all, it executes on every allocator operation). Turning on
* reentrancy is a fairly heavyweight mode (disabling tcache,
* redirecting to arena 0, etc.). It's possible we may one day want
* to turn on reentrant mode here, if it proves too difficult to keep
* this working. But that's fairly easy for us to see; OTOH, people
* not using hooks because they're too slow is easy for us to miss.
*
* The tricky part is
* that this code might get invoked even if we don't have access to tsd.
* This function mimics getting a pointer to thread-local data, except
* that it might secretly return a pointer to some global data if we
* know that the caller will take the early-exit path.
* If we return a bool that indicates that we are reentrant, then the
* caller will go down the early exit path, leaving the global
* untouched.
*/
static bool in_hook_global = true;
tsdn_t *tsdn = tsdn_fetch();
bool *in_hook = tsdn_in_hookp_get(tsdn);
if (in_hook!= NULL) {
return in_hook;
}
return &in_hook_global;
}
#define HOOK_PROLOGUE \
if (likely(atomic_load_u(&nhooks, ATOMIC_RELAXED) == 0)) { \
return; \
} \
bool *in_hook = hook_reentrantp(); \
if (*in_hook) { \
return; \
} \
*in_hook = true;
#define HOOK_EPILOGUE \
*in_hook = false;
void
hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw,
uintptr_t args_raw[3]) {
HOOK_PROLOGUE
hooks_internal_t hook;
FOR_EACH_HOOK_BEGIN(&hook)
hook_alloc h = hook.hooks.alloc_hook;
if (h != NULL) {
h(hook.hooks.extra, type, result, result_raw, args_raw);
}
FOR_EACH_HOOK_END
HOOK_EPILOGUE
}
void
hook_invoke_dalloc(hook_dalloc_t type, void *address, uintptr_t args_raw[3]) {
HOOK_PROLOGUE
hooks_internal_t hook;
FOR_EACH_HOOK_BEGIN(&hook)
hook_dalloc h = hook.hooks.dalloc_hook;
if (h != NULL) {
h(hook.hooks.extra, type, address, args_raw);
}
FOR_EACH_HOOK_END
HOOK_EPILOGUE
}
void
hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize,
size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]) {
HOOK_PROLOGUE
hooks_internal_t hook;
FOR_EACH_HOOK_BEGIN(&hook)
hook_expand h = hook.hooks.expand_hook;
if (h != NULL) {
h(hook.hooks.extra, type, address, old_usize, new_usize,
result_raw, args_raw);
}
FOR_EACH_HOOK_END
HOOK_EPILOGUE
}

1044
BeefRT/JEMalloc/src/hpa.c Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,63 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/hpa_hooks.h"
static void *hpa_hooks_map(size_t size);
static void hpa_hooks_unmap(void *ptr, size_t size);
static void hpa_hooks_purge(void *ptr, size_t size);
static void hpa_hooks_hugify(void *ptr, size_t size);
static void hpa_hooks_dehugify(void *ptr, size_t size);
static void hpa_hooks_curtime(nstime_t *r_nstime, bool first_reading);
static uint64_t hpa_hooks_ms_since(nstime_t *past_nstime);
hpa_hooks_t hpa_hooks_default = {
&hpa_hooks_map,
&hpa_hooks_unmap,
&hpa_hooks_purge,
&hpa_hooks_hugify,
&hpa_hooks_dehugify,
&hpa_hooks_curtime,
&hpa_hooks_ms_since
};
static void *
hpa_hooks_map(size_t size) {
bool commit = true;
return pages_map(NULL, size, HUGEPAGE, &commit);
}
static void
hpa_hooks_unmap(void *ptr, size_t size) {
pages_unmap(ptr, size);
}
static void
hpa_hooks_purge(void *ptr, size_t size) {
pages_purge_forced(ptr, size);
}
static void
hpa_hooks_hugify(void *ptr, size_t size) {
bool err = pages_huge(ptr, size);
(void)err;
}
static void
hpa_hooks_dehugify(void *ptr, size_t size) {
bool err = pages_nohuge(ptr, size);
(void)err;
}
static void
hpa_hooks_curtime(nstime_t *r_nstime, bool first_reading) {
if (first_reading) {
nstime_init_zero(r_nstime);
}
nstime_update(r_nstime);
}
static uint64_t
hpa_hooks_ms_since(nstime_t *past_nstime) {
return nstime_ns_since(past_nstime) / 1000 / 1000;
}

View file

@ -0,0 +1,325 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/hpdata.h"
static int
hpdata_age_comp(const hpdata_t *a, const hpdata_t *b) {
uint64_t a_age = hpdata_age_get(a);
uint64_t b_age = hpdata_age_get(b);
/*
* hpdata ages are operation counts in the psset; no two should be the
* same.
*/
assert(a_age != b_age);
return (a_age > b_age) - (a_age < b_age);
}
ph_gen(, hpdata_age_heap, hpdata_t, age_link, hpdata_age_comp)
void
hpdata_init(hpdata_t *hpdata, void *addr, uint64_t age) {
hpdata_addr_set(hpdata, addr);
hpdata_age_set(hpdata, age);
hpdata->h_huge = false;
hpdata->h_alloc_allowed = true;
hpdata->h_in_psset_alloc_container = false;
hpdata->h_purge_allowed = false;
hpdata->h_hugify_allowed = false;
hpdata->h_in_psset_hugify_container = false;
hpdata->h_mid_purge = false;
hpdata->h_mid_hugify = false;
hpdata->h_updating = false;
hpdata->h_in_psset = false;
hpdata_longest_free_range_set(hpdata, HUGEPAGE_PAGES);
hpdata->h_nactive = 0;
fb_init(hpdata->active_pages, HUGEPAGE_PAGES);
hpdata->h_ntouched = 0;
fb_init(hpdata->touched_pages, HUGEPAGE_PAGES);
hpdata_assert_consistent(hpdata);
}
void *
hpdata_reserve_alloc(hpdata_t *hpdata, size_t sz) {
hpdata_assert_consistent(hpdata);
/*
* This is a metadata change; the hpdata should therefore either not be
* in the psset, or should have explicitly marked itself as being
* mid-update.
*/
assert(!hpdata->h_in_psset || hpdata->h_updating);
assert(hpdata->h_alloc_allowed);
assert((sz & PAGE_MASK) == 0);
size_t npages = sz >> LG_PAGE;
assert(npages <= hpdata_longest_free_range_get(hpdata));
size_t result;
size_t start = 0;
/*
* These are dead stores, but the compiler will issue warnings on them
* since it can't tell statically that found is always true below.
*/
size_t begin = 0;
size_t len = 0;
size_t largest_unchosen_range = 0;
while (true) {
bool found = fb_urange_iter(hpdata->active_pages,
HUGEPAGE_PAGES, start, &begin, &len);
/*
* A precondition to this function is that hpdata must be able
* to serve the allocation.
*/
assert(found);
assert(len <= hpdata_longest_free_range_get(hpdata));
if (len >= npages) {
/*
* We use first-fit within the page slabs; this gives
* bounded worst-case fragmentation within a slab. It's
* not necessarily right; we could experiment with
* various other options.
*/
break;
}
if (len > largest_unchosen_range) {
largest_unchosen_range = len;
}
start = begin + len;
}
/* We found a range; remember it. */
result = begin;
fb_set_range(hpdata->active_pages, HUGEPAGE_PAGES, begin, npages);
hpdata->h_nactive += npages;
/*
* We might be about to dirty some memory for the first time; update our
* count if so.
*/
size_t new_dirty = fb_ucount(hpdata->touched_pages, HUGEPAGE_PAGES,
result, npages);
fb_set_range(hpdata->touched_pages, HUGEPAGE_PAGES, result, npages);
hpdata->h_ntouched += new_dirty;
/*
* If we allocated out of a range that was the longest in the hpdata, it
* might be the only one of that size and we'll have to adjust the
* metadata.
*/
if (len == hpdata_longest_free_range_get(hpdata)) {
start = begin + npages;
while (start < HUGEPAGE_PAGES) {
bool found = fb_urange_iter(hpdata->active_pages,
HUGEPAGE_PAGES, start, &begin, &len);
if (!found) {
break;
}
assert(len <= hpdata_longest_free_range_get(hpdata));
if (len == hpdata_longest_free_range_get(hpdata)) {
largest_unchosen_range = len;
break;
}
if (len > largest_unchosen_range) {
largest_unchosen_range = len;
}
start = begin + len;
}
hpdata_longest_free_range_set(hpdata, largest_unchosen_range);
}
hpdata_assert_consistent(hpdata);
return (void *)(
(uintptr_t)hpdata_addr_get(hpdata) + (result << LG_PAGE));
}
void
hpdata_unreserve(hpdata_t *hpdata, void *addr, size_t sz) {
hpdata_assert_consistent(hpdata);
/* See the comment in reserve. */
assert(!hpdata->h_in_psset || hpdata->h_updating);
assert(((uintptr_t)addr & PAGE_MASK) == 0);
assert((sz & PAGE_MASK) == 0);
size_t begin = ((uintptr_t)addr - (uintptr_t)hpdata_addr_get(hpdata))
>> LG_PAGE;
assert(begin < HUGEPAGE_PAGES);
size_t npages = sz >> LG_PAGE;
size_t old_longest_range = hpdata_longest_free_range_get(hpdata);
fb_unset_range(hpdata->active_pages, HUGEPAGE_PAGES, begin, npages);
/* We might have just created a new, larger range. */
size_t new_begin = (fb_fls(hpdata->active_pages, HUGEPAGE_PAGES,
begin) + 1);
size_t new_end = fb_ffs(hpdata->active_pages, HUGEPAGE_PAGES,
begin + npages - 1);
size_t new_range_len = new_end - new_begin;
if (new_range_len > old_longest_range) {
hpdata_longest_free_range_set(hpdata, new_range_len);
}
hpdata->h_nactive -= npages;
hpdata_assert_consistent(hpdata);
}
size_t
hpdata_purge_begin(hpdata_t *hpdata, hpdata_purge_state_t *purge_state) {
hpdata_assert_consistent(hpdata);
/*
* See the comment below; we might purge any inactive extent, so it's
* unsafe for any other thread to turn any inactive extent active while
* we're operating on it.
*/
assert(!hpdata_alloc_allowed_get(hpdata));
purge_state->npurged = 0;
purge_state->next_purge_search_begin = 0;
/*
* Initialize to_purge.
*
* It's possible to end up in situations where two dirty extents are
* separated by a retained extent:
* - 1 page allocated.
* - 1 page allocated.
* - 1 pages allocated.
*
* If the middle page is freed and purged, and then the first and third
* pages are freed, and then another purge pass happens, the hpdata
* looks like this:
* - 1 page dirty.
* - 1 page retained.
* - 1 page dirty.
*
* But it's safe to do a single 3-page purge.
*
* We do this by first computing the dirty pages, and then filling in
* any gaps by extending each range in the dirty bitmap to extend until
* the next active page. This purges more pages, but the expensive part
* of purging is the TLB shootdowns, rather than the kernel state
* tracking; doing a little bit more of the latter is fine if it saves
* us from doing some of the former.
*/
/*
* The dirty pages are those that are touched but not active. Note that
* in a normal-ish case, HUGEPAGE_PAGES is something like 512 and the
* fb_group_t is 64 bits, so this is 64 bytes, spread across 8
* fb_group_ts.
*/
fb_group_t dirty_pages[FB_NGROUPS(HUGEPAGE_PAGES)];
fb_init(dirty_pages, HUGEPAGE_PAGES);
fb_bit_not(dirty_pages, hpdata->active_pages, HUGEPAGE_PAGES);
fb_bit_and(dirty_pages, dirty_pages, hpdata->touched_pages,
HUGEPAGE_PAGES);
fb_init(purge_state->to_purge, HUGEPAGE_PAGES);
size_t next_bit = 0;
while (next_bit < HUGEPAGE_PAGES) {
size_t next_dirty = fb_ffs(dirty_pages, HUGEPAGE_PAGES,
next_bit);
/* Recall that fb_ffs returns nbits if no set bit is found. */
if (next_dirty == HUGEPAGE_PAGES) {
break;
}
size_t next_active = fb_ffs(hpdata->active_pages,
HUGEPAGE_PAGES, next_dirty);
/*
* Don't purge past the end of the dirty extent, into retained
* pages. This helps the kernel a tiny bit, but honestly it's
* mostly helpful for testing (where we tend to write test cases
* that think in terms of the dirty ranges).
*/
ssize_t last_dirty = fb_fls(dirty_pages, HUGEPAGE_PAGES,
next_active - 1);
assert(last_dirty >= 0);
assert((size_t)last_dirty >= next_dirty);
assert((size_t)last_dirty - next_dirty + 1 <= HUGEPAGE_PAGES);
fb_set_range(purge_state->to_purge, HUGEPAGE_PAGES, next_dirty,
last_dirty - next_dirty + 1);
next_bit = next_active + 1;
}
/* We should purge, at least, everything dirty. */
size_t ndirty = hpdata->h_ntouched - hpdata->h_nactive;
purge_state->ndirty_to_purge = ndirty;
assert(ndirty <= fb_scount(
purge_state->to_purge, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES));
assert(ndirty == fb_scount(dirty_pages, HUGEPAGE_PAGES, 0,
HUGEPAGE_PAGES));
hpdata_assert_consistent(hpdata);
return ndirty;
}
bool
hpdata_purge_next(hpdata_t *hpdata, hpdata_purge_state_t *purge_state,
void **r_purge_addr, size_t *r_purge_size) {
/*
* Note that we don't have a consistency check here; we're accessing
* hpdata without synchronization, and therefore have no right to expect
* a consistent state.
*/
assert(!hpdata_alloc_allowed_get(hpdata));
if (purge_state->next_purge_search_begin == HUGEPAGE_PAGES) {
return false;
}
size_t purge_begin;
size_t purge_len;
bool found_range = fb_srange_iter(purge_state->to_purge, HUGEPAGE_PAGES,
purge_state->next_purge_search_begin, &purge_begin, &purge_len);
if (!found_range) {
return false;
}
*r_purge_addr = (void *)(
(uintptr_t)hpdata_addr_get(hpdata) + purge_begin * PAGE);
*r_purge_size = purge_len * PAGE;
purge_state->next_purge_search_begin = purge_begin + purge_len;
purge_state->npurged += purge_len;
assert(purge_state->npurged <= HUGEPAGE_PAGES);
return true;
}
void
hpdata_purge_end(hpdata_t *hpdata, hpdata_purge_state_t *purge_state) {
assert(!hpdata_alloc_allowed_get(hpdata));
hpdata_assert_consistent(hpdata);
/* See the comment in reserve. */
assert(!hpdata->h_in_psset || hpdata->h_updating);
assert(purge_state->npurged == fb_scount(purge_state->to_purge,
HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES));
assert(purge_state->npurged >= purge_state->ndirty_to_purge);
fb_bit_not(purge_state->to_purge, purge_state->to_purge,
HUGEPAGE_PAGES);
fb_bit_and(hpdata->touched_pages, hpdata->touched_pages,
purge_state->to_purge, HUGEPAGE_PAGES);
assert(hpdata->h_ntouched >= purge_state->ndirty_to_purge);
hpdata->h_ntouched -= purge_state->ndirty_to_purge;
hpdata_assert_consistent(hpdata);
}
void
hpdata_hugify(hpdata_t *hpdata) {
hpdata_assert_consistent(hpdata);
hpdata->h_huge = true;
fb_set_range(hpdata->touched_pages, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES);
hpdata->h_ntouched = HUGEPAGE_PAGES;
hpdata_assert_consistent(hpdata);
}
void
hpdata_dehugify(hpdata_t *hpdata) {
hpdata_assert_consistent(hpdata);
hpdata->h_huge = false;
hpdata_assert_consistent(hpdata);
}

View file

@ -0,0 +1,77 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
void
inspect_extent_util_stats_get(tsdn_t *tsdn, const void *ptr, size_t *nfree,
size_t *nregs, size_t *size) {
assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL);
const edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
if (unlikely(edata == NULL)) {
*nfree = *nregs = *size = 0;
return;
}
*size = edata_size_get(edata);
if (!edata_slab_get(edata)) {
*nfree = 0;
*nregs = 1;
} else {
*nfree = edata_nfree_get(edata);
*nregs = bin_infos[edata_szind_get(edata)].nregs;
assert(*nfree <= *nregs);
assert(*nfree * edata_usize_get(edata) <= *size);
}
}
void
inspect_extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
size_t *nfree, size_t *nregs, size_t *size, size_t *bin_nfree,
size_t *bin_nregs, void **slabcur_addr) {
assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL
&& bin_nfree != NULL && bin_nregs != NULL && slabcur_addr != NULL);
const edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
if (unlikely(edata == NULL)) {
*nfree = *nregs = *size = *bin_nfree = *bin_nregs = 0;
*slabcur_addr = NULL;
return;
}
*size = edata_size_get(edata);
if (!edata_slab_get(edata)) {
*nfree = *bin_nfree = *bin_nregs = 0;
*nregs = 1;
*slabcur_addr = NULL;
return;
}
*nfree = edata_nfree_get(edata);
const szind_t szind = edata_szind_get(edata);
*nregs = bin_infos[szind].nregs;
assert(*nfree <= *nregs);
assert(*nfree * edata_usize_get(edata) <= *size);
arena_t *arena = (arena_t *)atomic_load_p(
&arenas[edata_arena_ind_get(edata)], ATOMIC_RELAXED);
assert(arena != NULL);
const unsigned binshard = edata_binshard_get(edata);
bin_t *bin = arena_get_bin(arena, szind, binshard);
malloc_mutex_lock(tsdn, &bin->lock);
if (config_stats) {
*bin_nregs = *nregs * bin->stats.curslabs;
assert(*bin_nregs >= bin->stats.curregs);
*bin_nfree = *bin_nregs - bin->stats.curregs;
} else {
*bin_nfree = *bin_nregs = 0;
}
edata_t *slab;
if (bin->slabcur != NULL) {
slab = bin->slabcur;
} else {
slab = edata_heap_first(&bin->slabs_nonfull);
}
*slabcur_addr = slab != NULL ? edata_addr_get(slab) : NULL;
malloc_mutex_unlock(tsdn, &bin->lock);
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,254 @@
#include <mutex>
#include <new>
#define JEMALLOC_CPP_CPP_
#ifdef __cplusplus
extern "C" {
#endif
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#ifdef __cplusplus
}
#endif
// All operators in this file are exported.
// Possibly alias hidden versions of malloc and sdallocx to avoid an extra plt
// thunk?
//
// extern __typeof (sdallocx) sdallocx_int
// __attribute ((alias ("sdallocx"),
// visibility ("hidden")));
//
// ... but it needs to work with jemalloc namespaces.
void *operator new(std::size_t size);
void *operator new[](std::size_t size);
void *operator new(std::size_t size, const std::nothrow_t &) noexcept;
void *operator new[](std::size_t size, const std::nothrow_t &) noexcept;
void operator delete(void *ptr) noexcept;
void operator delete[](void *ptr) noexcept;
void operator delete(void *ptr, const std::nothrow_t &) noexcept;
void operator delete[](void *ptr, const std::nothrow_t &) noexcept;
#if __cpp_sized_deallocation >= 201309
/* C++14's sized-delete operators. */
void operator delete(void *ptr, std::size_t size) noexcept;
void operator delete[](void *ptr, std::size_t size) noexcept;
#endif
#if __cpp_aligned_new >= 201606
/* C++17's over-aligned operators. */
void *operator new(std::size_t size, std::align_val_t);
void *operator new(std::size_t size, std::align_val_t, const std::nothrow_t &) noexcept;
void *operator new[](std::size_t size, std::align_val_t);
void *operator new[](std::size_t size, std::align_val_t, const std::nothrow_t &) noexcept;
void operator delete(void* ptr, std::align_val_t) noexcept;
void operator delete(void* ptr, std::align_val_t, const std::nothrow_t &) noexcept;
void operator delete(void* ptr, std::size_t size, std::align_val_t al) noexcept;
void operator delete[](void* ptr, std::align_val_t) noexcept;
void operator delete[](void* ptr, std::align_val_t, const std::nothrow_t &) noexcept;
void operator delete[](void* ptr, std::size_t size, std::align_val_t al) noexcept;
#endif
JEMALLOC_NOINLINE
static void *
handleOOM(std::size_t size, bool nothrow) {
if (opt_experimental_infallible_new) {
safety_check_fail("<jemalloc>: Allocation failed and "
"opt.experimental_infallible_new is true. Aborting.\n");
return nullptr;
}
void *ptr = nullptr;
while (ptr == nullptr) {
std::new_handler handler;
// GCC-4.8 and clang 4.0 do not have std::get_new_handler.
{
static std::mutex mtx;
std::lock_guard<std::mutex> lock(mtx);
handler = std::set_new_handler(nullptr);
std::set_new_handler(handler);
}
if (handler == nullptr)
break;
try {
handler();
} catch (const std::bad_alloc &) {
break;
}
ptr = je_malloc(size);
}
if (ptr == nullptr && !nothrow)
std::__throw_bad_alloc();
return ptr;
}
template <bool IsNoExcept>
JEMALLOC_NOINLINE
static void *
fallback_impl(std::size_t size) noexcept(IsNoExcept) {
void *ptr = malloc_default(size);
if (likely(ptr != nullptr)) {
return ptr;
}
return handleOOM(size, IsNoExcept);
}
template <bool IsNoExcept>
JEMALLOC_ALWAYS_INLINE
void *
newImpl(std::size_t size) noexcept(IsNoExcept) {
return imalloc_fastpath(size, &fallback_impl<IsNoExcept>);
}
void *
operator new(std::size_t size) {
return newImpl<false>(size);
}
void *
operator new[](std::size_t size) {
return newImpl<false>(size);
}
void *
operator new(std::size_t size, const std::nothrow_t &) noexcept {
return newImpl<true>(size);
}
void *
operator new[](std::size_t size, const std::nothrow_t &) noexcept {
return newImpl<true>(size);
}
#if __cpp_aligned_new >= 201606
template <bool IsNoExcept>
JEMALLOC_ALWAYS_INLINE
void *
alignedNewImpl(std::size_t size, std::align_val_t alignment) noexcept(IsNoExcept) {
void *ptr = je_aligned_alloc(static_cast<std::size_t>(alignment), size);
if (likely(ptr != nullptr)) {
return ptr;
}
return handleOOM(size, IsNoExcept);
}
void *
operator new(std::size_t size, std::align_val_t alignment) {
return alignedNewImpl<false>(size, alignment);
}
void *
operator new[](std::size_t size, std::align_val_t alignment) {
return alignedNewImpl<false>(size, alignment);
}
void *
operator new(std::size_t size, std::align_val_t alignment, const std::nothrow_t &) noexcept {
return alignedNewImpl<true>(size, alignment);
}
void *
operator new[](std::size_t size, std::align_val_t alignment, const std::nothrow_t &) noexcept {
return alignedNewImpl<true>(size, alignment);
}
#endif // __cpp_aligned_new
void
operator delete(void *ptr) noexcept {
je_free(ptr);
}
void
operator delete[](void *ptr) noexcept {
je_free(ptr);
}
void
operator delete(void *ptr, const std::nothrow_t &) noexcept {
je_free(ptr);
}
void operator delete[](void *ptr, const std::nothrow_t &) noexcept {
je_free(ptr);
}
#if __cpp_sized_deallocation >= 201309
JEMALLOC_ALWAYS_INLINE
void
sizedDeleteImpl(void* ptr, std::size_t size) noexcept {
if (unlikely(ptr == nullptr)) {
return;
}
je_sdallocx_noflags(ptr, size);
}
void
operator delete(void *ptr, std::size_t size) noexcept {
sizedDeleteImpl(ptr, size);
}
void
operator delete[](void *ptr, std::size_t size) noexcept {
sizedDeleteImpl(ptr, size);
}
#endif // __cpp_sized_deallocation
#if __cpp_aligned_new >= 201606
JEMALLOC_ALWAYS_INLINE
void
alignedSizedDeleteImpl(void* ptr, std::size_t size, std::align_val_t alignment) noexcept {
if (config_debug) {
assert(((size_t)alignment & ((size_t)alignment - 1)) == 0);
}
if (unlikely(ptr == nullptr)) {
return;
}
je_sdallocx(ptr, size, MALLOCX_ALIGN(alignment));
}
void
operator delete(void* ptr, std::align_val_t) noexcept {
je_free(ptr);
}
void
operator delete[](void* ptr, std::align_val_t) noexcept {
je_free(ptr);
}
void
operator delete(void* ptr, std::align_val_t, const std::nothrow_t&) noexcept {
je_free(ptr);
}
void
operator delete[](void* ptr, std::align_val_t, const std::nothrow_t&) noexcept {
je_free(ptr);
}
void
operator delete(void* ptr, std::size_t size, std::align_val_t alignment) noexcept {
alignedSizedDeleteImpl(ptr, size, alignment);
}
void
operator delete[](void* ptr, std::size_t size, std::align_val_t alignment) noexcept {
alignedSizedDeleteImpl(ptr, size, alignment);
}
#endif // __cpp_aligned_new

322
BeefRT/JEMalloc/src/large.c Normal file
View file

@ -0,0 +1,322 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/emap.h"
#include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/prof_recent.h"
#include "jemalloc/internal/util.h"
/******************************************************************************/
void *
large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) {
assert(usize == sz_s2u(usize));
return large_palloc(tsdn, arena, usize, CACHELINE, zero);
}
void *
large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool zero) {
size_t ausize;
edata_t *edata;
UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
assert(!tsdn_null(tsdn) || arena != NULL);
ausize = sz_sa2u(usize, alignment);
if (unlikely(ausize == 0 || ausize > SC_LARGE_MAXCLASS)) {
return NULL;
}
if (likely(!tsdn_null(tsdn))) {
arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, usize);
}
if (unlikely(arena == NULL) || (edata = arena_extent_alloc_large(tsdn,
arena, usize, alignment, zero)) == NULL) {
return NULL;
}
/* See comments in arena_bin_slabs_full_insert(). */
if (!arena_is_auto(arena)) {
/* Insert edata into large. */
malloc_mutex_lock(tsdn, &arena->large_mtx);
edata_list_active_append(&arena->large, edata);
malloc_mutex_unlock(tsdn, &arena->large_mtx);
}
arena_decay_tick(tsdn, arena);
return edata_addr_get(edata);
}
static bool
large_ralloc_no_move_shrink(tsdn_t *tsdn, edata_t *edata, size_t usize) {
arena_t *arena = arena_get_from_edata(edata);
ehooks_t *ehooks = arena_get_ehooks(arena);
size_t old_size = edata_size_get(edata);
size_t old_usize = edata_usize_get(edata);
assert(old_usize > usize);
if (ehooks_split_will_fail(ehooks)) {
return true;
}
bool deferred_work_generated = false;
bool err = pa_shrink(tsdn, &arena->pa_shard, edata, old_size,
usize + sz_large_pad, sz_size2index(usize),
&deferred_work_generated);
if (err) {
return true;
}
if (deferred_work_generated) {
arena_handle_deferred_work(tsdn, arena);
}
arena_extent_ralloc_large_shrink(tsdn, arena, edata, old_usize);
return false;
}
static bool
large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
bool zero) {
arena_t *arena = arena_get_from_edata(edata);
size_t old_size = edata_size_get(edata);
size_t old_usize = edata_usize_get(edata);
size_t new_size = usize + sz_large_pad;
szind_t szind = sz_size2index(usize);
bool deferred_work_generated = false;
bool err = pa_expand(tsdn, &arena->pa_shard, edata, old_size, new_size,
szind, zero, &deferred_work_generated);
if (deferred_work_generated) {
arena_handle_deferred_work(tsdn, arena);
}
if (err) {
return true;
}
if (zero) {
if (opt_cache_oblivious) {
assert(sz_large_pad == PAGE);
/*
* Zero the trailing bytes of the original allocation's
* last page, since they are in an indeterminate state.
* There will always be trailing bytes, because ptr's
* offset from the beginning of the extent is a multiple
* of CACHELINE in [0 .. PAGE).
*/
void *zbase = (void *)
((uintptr_t)edata_addr_get(edata) + old_usize);
void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
PAGE));
size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
assert(nzero > 0);
memset(zbase, 0, nzero);
}
}
arena_extent_ralloc_large_expand(tsdn, arena, edata, old_usize);
return false;
}
bool
large_ralloc_no_move(tsdn_t *tsdn, edata_t *edata, size_t usize_min,
size_t usize_max, bool zero) {
size_t oldusize = edata_usize_get(edata);
/* The following should have been caught by callers. */
assert(usize_min > 0 && usize_max <= SC_LARGE_MAXCLASS);
/* Both allocation sizes must be large to avoid a move. */
assert(oldusize >= SC_LARGE_MINCLASS
&& usize_max >= SC_LARGE_MINCLASS);
if (usize_max > oldusize) {
/* Attempt to expand the allocation in-place. */
if (!large_ralloc_no_move_expand(tsdn, edata, usize_max,
zero)) {
arena_decay_tick(tsdn, arena_get_from_edata(edata));
return false;
}
/* Try again, this time with usize_min. */
if (usize_min < usize_max && usize_min > oldusize &&
large_ralloc_no_move_expand(tsdn, edata, usize_min, zero)) {
arena_decay_tick(tsdn, arena_get_from_edata(edata));
return false;
}
}
/*
* Avoid moving the allocation if the existing extent size accommodates
* the new size.
*/
if (oldusize >= usize_min && oldusize <= usize_max) {
arena_decay_tick(tsdn, arena_get_from_edata(edata));
return false;
}
/* Attempt to shrink the allocation in-place. */
if (oldusize > usize_max) {
if (!large_ralloc_no_move_shrink(tsdn, edata, usize_max)) {
arena_decay_tick(tsdn, arena_get_from_edata(edata));
return false;
}
}
return true;
}
static void *
large_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool zero) {
if (alignment <= CACHELINE) {
return large_malloc(tsdn, arena, usize, zero);
}
return large_palloc(tsdn, arena, usize, alignment, zero);
}
void *
large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
size_t alignment, bool zero, tcache_t *tcache,
hook_ralloc_args_t *hook_args) {
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
size_t oldusize = edata_usize_get(edata);
/* The following should have been caught by callers. */
assert(usize > 0 && usize <= SC_LARGE_MAXCLASS);
/* Both allocation sizes must be large to avoid a move. */
assert(oldusize >= SC_LARGE_MINCLASS
&& usize >= SC_LARGE_MINCLASS);
/* Try to avoid moving the allocation. */
if (!large_ralloc_no_move(tsdn, edata, usize, usize, zero)) {
hook_invoke_expand(hook_args->is_realloc
? hook_expand_realloc : hook_expand_rallocx, ptr, oldusize,
usize, (uintptr_t)ptr, hook_args->args);
return edata_addr_get(edata);
}
/*
* usize and old size are different enough that we need to use a
* different size class. In that case, fall back to allocating new
* space and copying.
*/
void *ret = large_ralloc_move_helper(tsdn, arena, usize, alignment,
zero);
if (ret == NULL) {
return NULL;
}
hook_invoke_alloc(hook_args->is_realloc
? hook_alloc_realloc : hook_alloc_rallocx, ret, (uintptr_t)ret,
hook_args->args);
hook_invoke_dalloc(hook_args->is_realloc
? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args);
size_t copysize = (usize < oldusize) ? usize : oldusize;
memcpy(ret, edata_addr_get(edata), copysize);
isdalloct(tsdn, edata_addr_get(edata), oldusize, tcache, NULL, true);
return ret;
}
/*
* locked indicates whether the arena's large_mtx is currently held.
*/
static void
large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
bool locked) {
if (!locked) {
/* See comments in arena_bin_slabs_full_insert(). */
if (!arena_is_auto(arena)) {
malloc_mutex_lock(tsdn, &arena->large_mtx);
edata_list_active_remove(&arena->large, edata);
malloc_mutex_unlock(tsdn, &arena->large_mtx);
}
} else {
/* Only hold the large_mtx if necessary. */
if (!arena_is_auto(arena)) {
malloc_mutex_assert_owner(tsdn, &arena->large_mtx);
edata_list_active_remove(&arena->large, edata);
}
}
arena_extent_dalloc_large_prep(tsdn, arena, edata);
}
static void
large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata) {
bool deferred_work_generated = false;
pa_dalloc(tsdn, &arena->pa_shard, edata, &deferred_work_generated);
if (deferred_work_generated) {
arena_handle_deferred_work(tsdn, arena);
}
}
void
large_dalloc_prep_locked(tsdn_t *tsdn, edata_t *edata) {
large_dalloc_prep_impl(tsdn, arena_get_from_edata(edata), edata, true);
}
void
large_dalloc_finish(tsdn_t *tsdn, edata_t *edata) {
large_dalloc_finish_impl(tsdn, arena_get_from_edata(edata), edata);
}
void
large_dalloc(tsdn_t *tsdn, edata_t *edata) {
arena_t *arena = arena_get_from_edata(edata);
large_dalloc_prep_impl(tsdn, arena, edata, false);
large_dalloc_finish_impl(tsdn, arena, edata);
arena_decay_tick(tsdn, arena);
}
size_t
large_salloc(tsdn_t *tsdn, const edata_t *edata) {
return edata_usize_get(edata);
}
void
large_prof_info_get(tsd_t *tsd, edata_t *edata, prof_info_t *prof_info,
bool reset_recent) {
assert(prof_info != NULL);
prof_tctx_t *alloc_tctx = edata_prof_tctx_get(edata);
prof_info->alloc_tctx = alloc_tctx;
if ((uintptr_t)alloc_tctx > (uintptr_t)1U) {
nstime_copy(&prof_info->alloc_time,
edata_prof_alloc_time_get(edata));
prof_info->alloc_size = edata_prof_alloc_size_get(edata);
if (reset_recent) {
/*
* Reset the pointer on the recent allocation record,
* so that this allocation is recorded as released.
*/
prof_recent_alloc_reset(tsd, edata);
}
}
}
static void
large_prof_tctx_set(edata_t *edata, prof_tctx_t *tctx) {
edata_prof_tctx_set(edata, tctx);
}
void
large_prof_tctx_reset(edata_t *edata) {
large_prof_tctx_set(edata, (prof_tctx_t *)(uintptr_t)1U);
}
void
large_prof_info_set(edata_t *edata, prof_tctx_t *tctx, size_t size) {
nstime_t t;
nstime_prof_init_update(&t);
edata_prof_alloc_time_set(edata, &t);
edata_prof_alloc_size_set(edata, size);
edata_prof_recent_alloc_init(edata);
large_prof_tctx_set(edata, tctx);
}

78
BeefRT/JEMalloc/src/log.c Normal file
View file

@ -0,0 +1,78 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/log.h"
char log_var_names[JEMALLOC_LOG_VAR_BUFSIZE];
atomic_b_t log_init_done = ATOMIC_INIT(false);
/*
* Returns true if we were able to pick out a segment. Fills in r_segment_end
* with a pointer to the first character after the end of the string.
*/
static const char *
log_var_extract_segment(const char* segment_begin) {
const char *end;
for (end = segment_begin; *end != '\0' && *end != '|'; end++) {
}
return end;
}
static bool
log_var_matches_segment(const char *segment_begin, const char *segment_end,
const char *log_var_begin, const char *log_var_end) {
assert(segment_begin <= segment_end);
assert(log_var_begin < log_var_end);
ptrdiff_t segment_len = segment_end - segment_begin;
ptrdiff_t log_var_len = log_var_end - log_var_begin;
/* The special '.' segment matches everything. */
if (segment_len == 1 && *segment_begin == '.') {
return true;
}
if (segment_len == log_var_len) {
return strncmp(segment_begin, log_var_begin, segment_len) == 0;
} else if (segment_len < log_var_len) {
return strncmp(segment_begin, log_var_begin, segment_len) == 0
&& log_var_begin[segment_len] == '.';
} else {
return false;
}
}
unsigned
log_var_update_state(log_var_t *log_var) {
const char *log_var_begin = log_var->name;
const char *log_var_end = log_var->name + strlen(log_var->name);
/* Pointer to one before the beginning of the current segment. */
const char *segment_begin = log_var_names;
/*
* If log_init done is false, we haven't parsed the malloc conf yet. To
* avoid log-spew, we default to not displaying anything.
*/
if (!atomic_load_b(&log_init_done, ATOMIC_ACQUIRE)) {
return LOG_INITIALIZED_NOT_ENABLED;
}
while (true) {
const char *segment_end = log_var_extract_segment(
segment_begin);
assert(segment_end < log_var_names + JEMALLOC_LOG_VAR_BUFSIZE);
if (log_var_matches_segment(segment_begin, segment_end,
log_var_begin, log_var_end)) {
atomic_store_u(&log_var->state, LOG_ENABLED,
ATOMIC_RELAXED);
return LOG_ENABLED;
}
if (*segment_end == '\0') {
/* Hit the end of the segment string with no match. */
atomic_store_u(&log_var->state,
LOG_INITIALIZED_NOT_ENABLED, ATOMIC_RELAXED);
return LOG_INITIALIZED_NOT_ENABLED;
}
/* Otherwise, skip the delimiter and continue. */
segment_begin = segment_end + 1;
}
}

View file

@ -0,0 +1,697 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/util.h"
#ifdef assert
# undef assert
#endif
#ifdef not_reached
# undef not_reached
#endif
#ifdef not_implemented
# undef not_implemented
#endif
#ifdef assert_not_implemented
# undef assert_not_implemented
#endif
/*
* Define simple versions of assertion macros that won't recurse in case
* of assertion failures in malloc_*printf().
*/
#define assert(e) do { \
if (config_debug && !(e)) { \
malloc_write("<jemalloc>: Failed assertion\n"); \
abort(); \
} \
} while (0)
#define not_reached() do { \
if (config_debug) { \
malloc_write("<jemalloc>: Unreachable code reached\n"); \
abort(); \
} \
unreachable(); \
} while (0)
#define not_implemented() do { \
if (config_debug) { \
malloc_write("<jemalloc>: Not implemented\n"); \
abort(); \
} \
} while (0)
#define assert_not_implemented(e) do { \
if (unlikely(config_debug && !(e))) { \
not_implemented(); \
} \
} while (0)
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1)
static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s,
size_t *slen_p);
#define D2S_BUFSIZE (1 + U2S_BUFSIZE)
static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p);
#define O2S_BUFSIZE (1 + U2S_BUFSIZE)
static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p);
#define X2S_BUFSIZE (2 + U2S_BUFSIZE)
static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s,
size_t *slen_p);
/******************************************************************************/
/* malloc_message() setup. */
void
wrtmessage(void *cbopaque, const char *s) {
malloc_write_fd(STDERR_FILENO, s, strlen(s));
}
JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s);
/*
* Wrapper around malloc_message() that avoids the need for
* je_malloc_message(...) throughout the code.
*/
void
malloc_write(const char *s) {
if (je_malloc_message != NULL) {
je_malloc_message(NULL, s);
} else {
wrtmessage(NULL, s);
}
}
/*
* glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
* provide a wrapper.
*/
int
buferror(int err, char *buf, size_t buflen) {
#ifdef _WIN32
FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0,
(LPSTR)buf, (DWORD)buflen, NULL);
return 0;
#elif defined(JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE) && defined(_GNU_SOURCE)
char *b = strerror_r(err, buf, buflen);
if (b != buf) {
strncpy(buf, b, buflen);
buf[buflen-1] = '\0';
}
return 0;
#else
return strerror_r(err, buf, buflen);
#endif
}
uintmax_t
malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) {
uintmax_t ret, digit;
unsigned b;
bool neg;
const char *p, *ns;
p = nptr;
if (base < 0 || base == 1 || base > 36) {
ns = p;
set_errno(EINVAL);
ret = UINTMAX_MAX;
goto label_return;
}
b = base;
/* Swallow leading whitespace and get sign, if any. */
neg = false;
while (true) {
switch (*p) {
case '\t': case '\n': case '\v': case '\f': case '\r': case ' ':
p++;
break;
case '-':
neg = true;
JEMALLOC_FALLTHROUGH;
case '+':
p++;
JEMALLOC_FALLTHROUGH;
default:
goto label_prefix;
}
}
/* Get prefix, if any. */
label_prefix:
/*
* Note where the first non-whitespace/sign character is so that it is
* possible to tell whether any digits are consumed (e.g., " 0" vs.
* " -x").
*/
ns = p;
if (*p == '0') {
switch (p[1]) {
case '0': case '1': case '2': case '3': case '4': case '5':
case '6': case '7':
if (b == 0) {
b = 8;
}
if (b == 8) {
p++;
}
break;
case 'X': case 'x':
switch (p[2]) {
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
case 'A': case 'B': case 'C': case 'D': case 'E':
case 'F':
case 'a': case 'b': case 'c': case 'd': case 'e':
case 'f':
if (b == 0) {
b = 16;
}
if (b == 16) {
p += 2;
}
break;
default:
break;
}
break;
default:
p++;
ret = 0;
goto label_return;
}
}
if (b == 0) {
b = 10;
}
/* Convert. */
ret = 0;
while ((*p >= '0' && *p <= '9' && (digit = *p - '0') < b)
|| (*p >= 'A' && *p <= 'Z' && (digit = 10 + *p - 'A') < b)
|| (*p >= 'a' && *p <= 'z' && (digit = 10 + *p - 'a') < b)) {
uintmax_t pret = ret;
ret *= b;
ret += digit;
if (ret < pret) {
/* Overflow. */
set_errno(ERANGE);
ret = UINTMAX_MAX;
goto label_return;
}
p++;
}
if (neg) {
ret = (uintmax_t)(-((intmax_t)ret));
}
if (p == ns) {
/* No conversion performed. */
set_errno(EINVAL);
ret = UINTMAX_MAX;
goto label_return;
}
label_return:
if (endptr != NULL) {
if (p == ns) {
/* No characters were converted. */
*endptr = (char *)nptr;
} else {
*endptr = (char *)p;
}
}
return ret;
}
static char *
u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) {
unsigned i;
i = U2S_BUFSIZE - 1;
s[i] = '\0';
switch (base) {
case 10:
do {
i--;
s[i] = "0123456789"[x % (uint64_t)10];
x /= (uint64_t)10;
} while (x > 0);
break;
case 16: {
const char *digits = (uppercase)
? "0123456789ABCDEF"
: "0123456789abcdef";
do {
i--;
s[i] = digits[x & 0xf];
x >>= 4;
} while (x > 0);
break;
} default: {
const char *digits = (uppercase)
? "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
: "0123456789abcdefghijklmnopqrstuvwxyz";
assert(base >= 2 && base <= 36);
do {
i--;
s[i] = digits[x % (uint64_t)base];
x /= (uint64_t)base;
} while (x > 0);
}}
*slen_p = U2S_BUFSIZE - 1 - i;
return &s[i];
}
static char *
d2s(intmax_t x, char sign, char *s, size_t *slen_p) {
bool neg;
if ((neg = (x < 0))) {
x = -x;
}
s = u2s(x, 10, false, s, slen_p);
if (neg) {
sign = '-';
}
switch (sign) {
case '-':
if (!neg) {
break;
}
JEMALLOC_FALLTHROUGH;
case ' ':
case '+':
s--;
(*slen_p)++;
*s = sign;
break;
default: not_reached();
}
return s;
}
static char *
o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p) {
s = u2s(x, 8, false, s, slen_p);
if (alt_form && *s != '0') {
s--;
(*slen_p)++;
*s = '0';
}
return s;
}
static char *
x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) {
s = u2s(x, 16, uppercase, s, slen_p);
if (alt_form) {
s -= 2;
(*slen_p) += 2;
memcpy(s, uppercase ? "0X" : "0x", 2);
}
return s;
}
JEMALLOC_COLD
size_t
malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
size_t i;
const char *f;
#define APPEND_C(c) do { \
if (i < size) { \
str[i] = (c); \
} \
i++; \
} while (0)
#define APPEND_S(s, slen) do { \
if (i < size) { \
size_t cpylen = (slen <= size - i) ? slen : size - i; \
memcpy(&str[i], s, cpylen); \
} \
i += slen; \
} while (0)
#define APPEND_PADDED_S(s, slen, width, left_justify) do { \
/* Left padding. */ \
size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \
(size_t)width - slen : 0); \
if (!left_justify && pad_len != 0) { \
size_t j; \
for (j = 0; j < pad_len; j++) { \
if (pad_zero) { \
APPEND_C('0'); \
} else { \
APPEND_C(' '); \
} \
} \
} \
/* Value. */ \
APPEND_S(s, slen); \
/* Right padding. */ \
if (left_justify && pad_len != 0) { \
size_t j; \
for (j = 0; j < pad_len; j++) { \
APPEND_C(' '); \
} \
} \
} while (0)
#define GET_ARG_NUMERIC(val, len) do { \
switch ((unsigned char)len) { \
case '?': \
val = va_arg(ap, int); \
break; \
case '?' | 0x80: \
val = va_arg(ap, unsigned int); \
break; \
case 'l': \
val = va_arg(ap, long); \
break; \
case 'l' | 0x80: \
val = va_arg(ap, unsigned long); \
break; \
case 'q': \
val = va_arg(ap, long long); \
break; \
case 'q' | 0x80: \
val = va_arg(ap, unsigned long long); \
break; \
case 'j': \
val = va_arg(ap, intmax_t); \
break; \
case 'j' | 0x80: \
val = va_arg(ap, uintmax_t); \
break; \
case 't': \
val = va_arg(ap, ptrdiff_t); \
break; \
case 'z': \
val = va_arg(ap, ssize_t); \
break; \
case 'z' | 0x80: \
val = va_arg(ap, size_t); \
break; \
case 'p': /* Synthetic; used for %p. */ \
val = va_arg(ap, uintptr_t); \
break; \
default: \
not_reached(); \
val = 0; \
} \
} while (0)
i = 0;
f = format;
while (true) {
switch (*f) {
case '\0': goto label_out;
case '%': {
bool alt_form = false;
bool left_justify = false;
bool plus_space = false;
bool plus_plus = false;
int prec = -1;
int width = -1;
unsigned char len = '?';
char *s;
size_t slen;
bool first_width_digit = true;
bool pad_zero = false;
f++;
/* Flags. */
while (true) {
switch (*f) {
case '#':
assert(!alt_form);
alt_form = true;
break;
case '-':
assert(!left_justify);
left_justify = true;
break;
case ' ':
assert(!plus_space);
plus_space = true;
break;
case '+':
assert(!plus_plus);
plus_plus = true;
break;
default: goto label_width;
}
f++;
}
/* Width. */
label_width:
switch (*f) {
case '*':
width = va_arg(ap, int);
f++;
if (width < 0) {
left_justify = true;
width = -width;
}
break;
case '0':
if (first_width_digit) {
pad_zero = true;
}
JEMALLOC_FALLTHROUGH;
case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9': {
uintmax_t uwidth;
set_errno(0);
uwidth = malloc_strtoumax(f, (char **)&f, 10);
assert(uwidth != UINTMAX_MAX || get_errno() !=
ERANGE);
width = (int)uwidth;
first_width_digit = false;
break;
} default:
break;
}
/* Width/precision separator. */
if (*f == '.') {
f++;
} else {
goto label_length;
}
/* Precision. */
switch (*f) {
case '*':
prec = va_arg(ap, int);
f++;
break;
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9': {
uintmax_t uprec;
set_errno(0);
uprec = malloc_strtoumax(f, (char **)&f, 10);
assert(uprec != UINTMAX_MAX || get_errno() !=
ERANGE);
prec = (int)uprec;
break;
}
default: break;
}
/* Length. */
label_length:
switch (*f) {
case 'l':
f++;
if (*f == 'l') {
len = 'q';
f++;
} else {
len = 'l';
}
break;
case 'q': case 'j': case 't': case 'z':
len = *f;
f++;
break;
default: break;
}
/* Conversion specifier. */
switch (*f) {
case '%':
/* %% */
APPEND_C(*f);
f++;
break;
case 'd': case 'i': {
intmax_t val JEMALLOC_CC_SILENCE_INIT(0);
char buf[D2S_BUFSIZE];
/*
* Outputting negative, zero-padded numbers
* would require a nontrivial rework of the
* interaction between the width and padding
* (since 0 padding goes between the '-' and the
* number, while ' ' padding goes either before
* the - or after the number. Since we
* currently don't ever need 0-padded negative
* numbers, just don't bother supporting it.
*/
assert(!pad_zero);
GET_ARG_NUMERIC(val, len);
s = d2s(val, (plus_plus ? '+' : (plus_space ?
' ' : '-')), buf, &slen);
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
} case 'o': {
uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
char buf[O2S_BUFSIZE];
GET_ARG_NUMERIC(val, len | 0x80);
s = o2s(val, alt_form, buf, &slen);
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
} case 'u': {
uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
char buf[U2S_BUFSIZE];
GET_ARG_NUMERIC(val, len | 0x80);
s = u2s(val, 10, false, buf, &slen);
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
} case 'x': case 'X': {
uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
char buf[X2S_BUFSIZE];
GET_ARG_NUMERIC(val, len | 0x80);
s = x2s(val, alt_form, *f == 'X', buf, &slen);
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
} case 'c': {
unsigned char val;
char buf[2];
assert(len == '?' || len == 'l');
assert_not_implemented(len != 'l');
val = va_arg(ap, int);
buf[0] = val;
buf[1] = '\0';
APPEND_PADDED_S(buf, 1, width, left_justify);
f++;
break;
} case 's':
assert(len == '?' || len == 'l');
assert_not_implemented(len != 'l');
s = va_arg(ap, char *);
slen = (prec < 0) ? strlen(s) : (size_t)prec;
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
case 'p': {
uintmax_t val;
char buf[X2S_BUFSIZE];
GET_ARG_NUMERIC(val, 'p');
s = x2s(val, true, false, buf, &slen);
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
} default: not_reached();
}
break;
} default: {
APPEND_C(*f);
f++;
break;
}}
}
label_out:
if (i < size) {
str[i] = '\0';
} else {
str[size - 1] = '\0';
}
#undef APPEND_C
#undef APPEND_S
#undef APPEND_PADDED_S
#undef GET_ARG_NUMERIC
return i;
}
JEMALLOC_FORMAT_PRINTF(3, 4)
size_t
malloc_snprintf(char *str, size_t size, const char *format, ...) {
size_t ret;
va_list ap;
va_start(ap, format);
ret = malloc_vsnprintf(str, size, format, ap);
va_end(ap);
return ret;
}
void
malloc_vcprintf(write_cb_t *write_cb, void *cbopaque, const char *format,
va_list ap) {
char buf[MALLOC_PRINTF_BUFSIZE];
if (write_cb == NULL) {
/*
* The caller did not provide an alternate write_cb callback
* function, so use the default one. malloc_write() is an
* inline function, so use malloc_message() directly here.
*/
write_cb = (je_malloc_message != NULL) ? je_malloc_message :
wrtmessage;
}
malloc_vsnprintf(buf, sizeof(buf), format, ap);
write_cb(cbopaque, buf);
}
/*
* Print to a callback function in such a way as to (hopefully) avoid memory
* allocation.
*/
JEMALLOC_FORMAT_PRINTF(3, 4)
void
malloc_cprintf(write_cb_t *write_cb, void *cbopaque, const char *format, ...) {
va_list ap;
va_start(ap, format);
malloc_vcprintf(write_cb, cbopaque, format, ap);
va_end(ap);
}
/* Print to stderr in such a way as to avoid memory allocation. */
JEMALLOC_FORMAT_PRINTF(1, 2)
void
malloc_printf(const char *format, ...) {
va_list ap;
va_start(ap, format);
malloc_vcprintf(NULL, NULL, format, ap);
va_end(ap);
}
/*
* Restore normal assertion macros, in order to make it possible to compile all
* C files as a single concatenation.
*/
#undef assert
#undef not_reached
#undef not_implemented
#undef assert_not_implemented
#include "jemalloc/internal/assert.h"

228
BeefRT/JEMalloc/src/mutex.c Normal file
View file

@ -0,0 +1,228 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/spin.h"
#ifndef _CRT_SPINCOUNT
#define _CRT_SPINCOUNT 4000
#endif
/*
* Based on benchmark results, a fixed spin with this amount of retries works
* well for our critical sections.
*/
int64_t opt_mutex_max_spin = 600;
/******************************************************************************/
/* Data. */
#ifdef JEMALLOC_LAZY_LOCK
bool isthreaded = false;
#endif
#ifdef JEMALLOC_MUTEX_INIT_CB
static bool postpone_init = true;
static malloc_mutex_t *postponed_mutexes = NULL;
#endif
/******************************************************************************/
/*
* We intercept pthread_create() calls in order to toggle isthreaded if the
* process goes multi-threaded.
*/
#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
JEMALLOC_EXPORT int
pthread_create(pthread_t *__restrict thread,
const pthread_attr_t *__restrict attr, void *(*start_routine)(void *),
void *__restrict arg) {
return pthread_create_wrapper(thread, attr, start_routine, arg);
}
#endif
/******************************************************************************/
#ifdef JEMALLOC_MUTEX_INIT_CB
JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
void *(calloc_cb)(size_t, size_t));
#endif
void
malloc_mutex_lock_slow(malloc_mutex_t *mutex) {
mutex_prof_data_t *data = &mutex->prof_data;
nstime_t before;
if (ncpus == 1) {
goto label_spin_done;
}
int cnt = 0;
do {
spin_cpu_spinwait();
if (!atomic_load_b(&mutex->locked, ATOMIC_RELAXED)
&& !malloc_mutex_trylock_final(mutex)) {
data->n_spin_acquired++;
return;
}
} while (cnt++ < opt_mutex_max_spin || opt_mutex_max_spin == -1);
if (!config_stats) {
/* Only spin is useful when stats is off. */
malloc_mutex_lock_final(mutex);
return;
}
label_spin_done:
nstime_init_update(&before);
/* Copy before to after to avoid clock skews. */
nstime_t after;
nstime_copy(&after, &before);
uint32_t n_thds = atomic_fetch_add_u32(&data->n_waiting_thds, 1,
ATOMIC_RELAXED) + 1;
/* One last try as above two calls may take quite some cycles. */
if (!malloc_mutex_trylock_final(mutex)) {
atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
data->n_spin_acquired++;
return;
}
/* True slow path. */
malloc_mutex_lock_final(mutex);
/* Update more slow-path only counters. */
atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
nstime_update(&after);
nstime_t delta;
nstime_copy(&delta, &after);
nstime_subtract(&delta, &before);
data->n_wait_times++;
nstime_add(&data->tot_wait_time, &delta);
if (nstime_compare(&data->max_wait_time, &delta) < 0) {
nstime_copy(&data->max_wait_time, &delta);
}
if (n_thds > data->max_n_thds) {
data->max_n_thds = n_thds;
}
}
static void
mutex_prof_data_init(mutex_prof_data_t *data) {
memset(data, 0, sizeof(mutex_prof_data_t));
nstime_init_zero(&data->max_wait_time);
nstime_init_zero(&data->tot_wait_time);
data->prev_owner = NULL;
}
void
malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex) {
malloc_mutex_assert_owner(tsdn, mutex);
mutex_prof_data_init(&mutex->prof_data);
}
static int
mutex_addr_comp(const witness_t *witness1, void *mutex1,
const witness_t *witness2, void *mutex2) {
assert(mutex1 != NULL);
assert(mutex2 != NULL);
uintptr_t mu1int = (uintptr_t)mutex1;
uintptr_t mu2int = (uintptr_t)mutex2;
if (mu1int < mu2int) {
return -1;
} else if (mu1int == mu2int) {
return 0;
} else {
return 1;
}
}
bool
malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
witness_rank_t rank, malloc_mutex_lock_order_t lock_order) {
mutex_prof_data_init(&mutex->prof_data);
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
InitializeSRWLock(&mutex->lock);
# else
if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
_CRT_SPINCOUNT)) {
return true;
}
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
mutex->lock = OS_UNFAIR_LOCK_INIT;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
if (postpone_init) {
mutex->postponed_next = postponed_mutexes;
postponed_mutexes = mutex;
} else {
if (_pthread_mutex_init_calloc_cb(&mutex->lock,
bootstrap_calloc) != 0) {
return true;
}
}
#else
pthread_mutexattr_t attr;
if (pthread_mutexattr_init(&attr) != 0) {
return true;
}
pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE);
if (pthread_mutex_init(&mutex->lock, &attr) != 0) {
pthread_mutexattr_destroy(&attr);
return true;
}
pthread_mutexattr_destroy(&attr);
#endif
if (config_debug) {
mutex->lock_order = lock_order;
if (lock_order == malloc_mutex_address_ordered) {
witness_init(&mutex->witness, name, rank,
mutex_addr_comp, mutex);
} else {
witness_init(&mutex->witness, name, rank, NULL, NULL);
}
}
return false;
}
void
malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) {
malloc_mutex_lock(tsdn, mutex);
}
void
malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) {
malloc_mutex_unlock(tsdn, mutex);
}
void
malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) {
#ifdef JEMALLOC_MUTEX_INIT_CB
malloc_mutex_unlock(tsdn, mutex);
#else
if (malloc_mutex_init(mutex, mutex->witness.name,
mutex->witness.rank, mutex->lock_order)) {
malloc_printf("<jemalloc>: Error re-initializing mutex in "
"child\n");
if (opt_abort) {
abort();
}
}
#endif
}
bool
malloc_mutex_boot(void) {
#ifdef JEMALLOC_MUTEX_INIT_CB
postpone_init = false;
while (postponed_mutexes != NULL) {
if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
bootstrap_calloc) != 0) {
return true;
}
postponed_mutexes = postponed_mutexes->postponed_next;
}
#endif
return false;
}

View file

@ -0,0 +1,289 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/assert.h"
#define BILLION UINT64_C(1000000000)
#define MILLION UINT64_C(1000000)
static void
nstime_set_initialized(nstime_t *time) {
#ifdef JEMALLOC_DEBUG
time->magic = NSTIME_MAGIC;
#endif
}
static void
nstime_assert_initialized(const nstime_t *time) {
#ifdef JEMALLOC_DEBUG
/*
* Some parts (e.g. stats) rely on memset to zero initialize. Treat
* these as valid initialization.
*/
assert(time->magic == NSTIME_MAGIC ||
(time->magic == 0 && time->ns == 0));
#endif
}
static void
nstime_pair_assert_initialized(const nstime_t *t1, const nstime_t *t2) {
nstime_assert_initialized(t1);
nstime_assert_initialized(t2);
}
static void
nstime_initialize_operand(nstime_t *time) {
/*
* Operations like nstime_add may have the initial operand being zero
* initialized (covered by the assert below). Full-initialize needed
* before changing it to non-zero.
*/
nstime_assert_initialized(time);
nstime_set_initialized(time);
}
void
nstime_init(nstime_t *time, uint64_t ns) {
nstime_set_initialized(time);
time->ns = ns;
}
void
nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec) {
nstime_set_initialized(time);
time->ns = sec * BILLION + nsec;
}
uint64_t
nstime_ns(const nstime_t *time) {
nstime_assert_initialized(time);
return time->ns;
}
uint64_t
nstime_msec(const nstime_t *time) {
nstime_assert_initialized(time);
return time->ns / MILLION;
}
uint64_t
nstime_sec(const nstime_t *time) {
nstime_assert_initialized(time);
return time->ns / BILLION;
}
uint64_t
nstime_nsec(const nstime_t *time) {
nstime_assert_initialized(time);
return time->ns % BILLION;
}
void
nstime_copy(nstime_t *time, const nstime_t *source) {
/* Source is required to be initialized. */
nstime_assert_initialized(source);
*time = *source;
nstime_assert_initialized(time);
}
int
nstime_compare(const nstime_t *a, const nstime_t *b) {
nstime_pair_assert_initialized(a, b);
return (a->ns > b->ns) - (a->ns < b->ns);
}
void
nstime_add(nstime_t *time, const nstime_t *addend) {
nstime_pair_assert_initialized(time, addend);
assert(UINT64_MAX - time->ns >= addend->ns);
nstime_initialize_operand(time);
time->ns += addend->ns;
}
void
nstime_iadd(nstime_t *time, uint64_t addend) {
nstime_assert_initialized(time);
assert(UINT64_MAX - time->ns >= addend);
nstime_initialize_operand(time);
time->ns += addend;
}
void
nstime_subtract(nstime_t *time, const nstime_t *subtrahend) {
nstime_pair_assert_initialized(time, subtrahend);
assert(nstime_compare(time, subtrahend) >= 0);
/* No initialize operand -- subtraction must be initialized. */
time->ns -= subtrahend->ns;
}
void
nstime_isubtract(nstime_t *time, uint64_t subtrahend) {
nstime_assert_initialized(time);
assert(time->ns >= subtrahend);
/* No initialize operand -- subtraction must be initialized. */
time->ns -= subtrahend;
}
void
nstime_imultiply(nstime_t *time, uint64_t multiplier) {
nstime_assert_initialized(time);
assert((((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) <<
2))) == 0) || ((time->ns * multiplier) / multiplier == time->ns));
nstime_initialize_operand(time);
time->ns *= multiplier;
}
void
nstime_idivide(nstime_t *time, uint64_t divisor) {
nstime_assert_initialized(time);
assert(divisor != 0);
nstime_initialize_operand(time);
time->ns /= divisor;
}
uint64_t
nstime_divide(const nstime_t *time, const nstime_t *divisor) {
nstime_pair_assert_initialized(time, divisor);
assert(divisor->ns != 0);
/* No initialize operand -- *time itself remains unchanged. */
return time->ns / divisor->ns;
}
/* Returns time since *past, w/o updating *past. */
uint64_t
nstime_ns_since(const nstime_t *past) {
nstime_assert_initialized(past);
nstime_t now;
nstime_copy(&now, past);
nstime_update(&now);
assert(nstime_compare(&now, past) >= 0);
return now.ns - past->ns;
}
#ifdef _WIN32
# define NSTIME_MONOTONIC true
static void
nstime_get(nstime_t *time) {
FILETIME ft;
uint64_t ticks_100ns;
GetSystemTimeAsFileTime(&ft);
ticks_100ns = (((uint64_t)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
nstime_init(time, ticks_100ns * 100);
}
#elif defined(JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE)
# define NSTIME_MONOTONIC true
static void
nstime_get(nstime_t *time) {
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
nstime_init2(time, ts.tv_sec, ts.tv_nsec);
}
#elif defined(JEMALLOC_HAVE_CLOCK_MONOTONIC)
# define NSTIME_MONOTONIC true
static void
nstime_get(nstime_t *time) {
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
nstime_init2(time, ts.tv_sec, ts.tv_nsec);
}
#elif defined(JEMALLOC_HAVE_MACH_ABSOLUTE_TIME)
# define NSTIME_MONOTONIC true
static void
nstime_get(nstime_t *time) {
nstime_init(time, mach_absolute_time());
}
#else
# define NSTIME_MONOTONIC false
static void
nstime_get(nstime_t *time) {
struct timeval tv;
gettimeofday(&tv, NULL);
nstime_init2(time, tv.tv_sec, tv.tv_usec * 1000);
}
#endif
static bool
nstime_monotonic_impl(void) {
return NSTIME_MONOTONIC;
#undef NSTIME_MONOTONIC
}
nstime_monotonic_t *JET_MUTABLE nstime_monotonic = nstime_monotonic_impl;
prof_time_res_t opt_prof_time_res =
prof_time_res_default;
const char *prof_time_res_mode_names[] = {
"default",
"high",
};
static void
nstime_get_realtime(nstime_t *time) {
#if defined(JEMALLOC_HAVE_CLOCK_REALTIME) && !defined(_WIN32)
struct timespec ts;
clock_gettime(CLOCK_REALTIME, &ts);
nstime_init2(time, ts.tv_sec, ts.tv_nsec);
#else
unreachable();
#endif
}
static void
nstime_prof_update_impl(nstime_t *time) {
nstime_t old_time;
nstime_copy(&old_time, time);
if (opt_prof_time_res == prof_time_res_high) {
nstime_get_realtime(time);
} else {
nstime_get(time);
}
}
nstime_prof_update_t *JET_MUTABLE nstime_prof_update = nstime_prof_update_impl;
static void
nstime_update_impl(nstime_t *time) {
nstime_t old_time;
nstime_copy(&old_time, time);
nstime_get(time);
/* Handle non-monotonic clocks. */
if (unlikely(nstime_compare(&old_time, time) > 0)) {
nstime_copy(time, &old_time);
}
}
nstime_update_t *JET_MUTABLE nstime_update = nstime_update_impl;
void
nstime_init_update(nstime_t *time) {
nstime_init_zero(time);
nstime_update(time);
}
void
nstime_prof_init_update(nstime_t *time) {
nstime_init_zero(time);
nstime_prof_update(time);
}

277
BeefRT/JEMalloc/src/pa.c Normal file
View file

@ -0,0 +1,277 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/san.h"
#include "jemalloc/internal/hpa.h"
static void
pa_nactive_add(pa_shard_t *shard, size_t add_pages) {
atomic_fetch_add_zu(&shard->nactive, add_pages, ATOMIC_RELAXED);
}
static void
pa_nactive_sub(pa_shard_t *shard, size_t sub_pages) {
assert(atomic_load_zu(&shard->nactive, ATOMIC_RELAXED) >= sub_pages);
atomic_fetch_sub_zu(&shard->nactive, sub_pages, ATOMIC_RELAXED);
}
bool
pa_central_init(pa_central_t *central, base_t *base, bool hpa,
hpa_hooks_t *hpa_hooks) {
bool err;
if (hpa) {
err = hpa_central_init(&central->hpa, base, hpa_hooks);
if (err) {
return true;
}
}
return false;
}
bool
pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, pa_central_t *central,
emap_t *emap, base_t *base, unsigned ind, pa_shard_stats_t *stats,
malloc_mutex_t *stats_mtx, nstime_t *cur_time,
size_t pac_oversize_threshold, ssize_t dirty_decay_ms,
ssize_t muzzy_decay_ms) {
/* This will change eventually, but for now it should hold. */
assert(base_ind_get(base) == ind);
if (edata_cache_init(&shard->edata_cache, base)) {
return true;
}
if (pac_init(tsdn, &shard->pac, base, emap, &shard->edata_cache,
cur_time, pac_oversize_threshold, dirty_decay_ms, muzzy_decay_ms,
&stats->pac_stats, stats_mtx)) {
return true;
}
shard->ind = ind;
shard->ever_used_hpa = false;
atomic_store_b(&shard->use_hpa, false, ATOMIC_RELAXED);
atomic_store_zu(&shard->nactive, 0, ATOMIC_RELAXED);
shard->stats_mtx = stats_mtx;
shard->stats = stats;
memset(shard->stats, 0, sizeof(*shard->stats));
shard->central = central;
shard->emap = emap;
shard->base = base;
return false;
}
bool
pa_shard_enable_hpa(tsdn_t *tsdn, pa_shard_t *shard,
const hpa_shard_opts_t *hpa_opts, const sec_opts_t *hpa_sec_opts) {
if (hpa_shard_init(&shard->hpa_shard, &shard->central->hpa, shard->emap,
shard->base, &shard->edata_cache, shard->ind, hpa_opts)) {
return true;
}
if (sec_init(tsdn, &shard->hpa_sec, shard->base, &shard->hpa_shard.pai,
hpa_sec_opts)) {
return true;
}
shard->ever_used_hpa = true;
atomic_store_b(&shard->use_hpa, true, ATOMIC_RELAXED);
return false;
}
void
pa_shard_disable_hpa(tsdn_t *tsdn, pa_shard_t *shard) {
atomic_store_b(&shard->use_hpa, false, ATOMIC_RELAXED);
if (shard->ever_used_hpa) {
sec_disable(tsdn, &shard->hpa_sec);
hpa_shard_disable(tsdn, &shard->hpa_shard);
}
}
void
pa_shard_reset(tsdn_t *tsdn, pa_shard_t *shard) {
atomic_store_zu(&shard->nactive, 0, ATOMIC_RELAXED);
if (shard->ever_used_hpa) {
sec_flush(tsdn, &shard->hpa_sec);
}
}
static bool
pa_shard_uses_hpa(pa_shard_t *shard) {
return atomic_load_b(&shard->use_hpa, ATOMIC_RELAXED);
}
void
pa_shard_destroy(tsdn_t *tsdn, pa_shard_t *shard) {
pac_destroy(tsdn, &shard->pac);
if (shard->ever_used_hpa) {
sec_flush(tsdn, &shard->hpa_sec);
hpa_shard_disable(tsdn, &shard->hpa_shard);
}
}
static pai_t *
pa_get_pai(pa_shard_t *shard, edata_t *edata) {
return (edata_pai_get(edata) == EXTENT_PAI_PAC
? &shard->pac.pai : &shard->hpa_sec.pai);
}
edata_t *
pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
bool slab, szind_t szind, bool zero, bool guarded,
bool *deferred_work_generated) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
assert(!guarded || alignment <= PAGE);
edata_t *edata = NULL;
if (!guarded && pa_shard_uses_hpa(shard)) {
edata = pai_alloc(tsdn, &shard->hpa_sec.pai, size, alignment,
zero, /* guarded */ false, slab, deferred_work_generated);
}
/*
* Fall back to the PAC if the HPA is off or couldn't serve the given
* allocation request.
*/
if (edata == NULL) {
edata = pai_alloc(tsdn, &shard->pac.pai, size, alignment, zero,
guarded, slab, deferred_work_generated);
}
if (edata != NULL) {
assert(edata_size_get(edata) == size);
pa_nactive_add(shard, size >> LG_PAGE);
emap_remap(tsdn, shard->emap, edata, szind, slab);
edata_szind_set(edata, szind);
edata_slab_set(edata, slab);
if (slab && (size > 2 * PAGE)) {
emap_register_interior(tsdn, shard->emap, edata, szind);
}
assert(edata_arena_ind_get(edata) == shard->ind);
}
return edata;
}
bool
pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
size_t new_size, szind_t szind, bool zero, bool *deferred_work_generated) {
assert(new_size > old_size);
assert(edata_size_get(edata) == old_size);
assert((new_size & PAGE_MASK) == 0);
if (edata_guarded_get(edata)) {
return true;
}
size_t expand_amount = new_size - old_size;
pai_t *pai = pa_get_pai(shard, edata);
bool error = pai_expand(tsdn, pai, edata, old_size, new_size, zero,
deferred_work_generated);
if (error) {
return true;
}
pa_nactive_add(shard, expand_amount >> LG_PAGE);
edata_szind_set(edata, szind);
emap_remap(tsdn, shard->emap, edata, szind, /* slab */ false);
return false;
}
bool
pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
size_t new_size, szind_t szind, bool *deferred_work_generated) {
assert(new_size < old_size);
assert(edata_size_get(edata) == old_size);
assert((new_size & PAGE_MASK) == 0);
if (edata_guarded_get(edata)) {
return true;
}
size_t shrink_amount = old_size - new_size;
pai_t *pai = pa_get_pai(shard, edata);
bool error = pai_shrink(tsdn, pai, edata, old_size, new_size,
deferred_work_generated);
if (error) {
return true;
}
pa_nactive_sub(shard, shrink_amount >> LG_PAGE);
edata_szind_set(edata, szind);
emap_remap(tsdn, shard->emap, edata, szind, /* slab */ false);
return false;
}
void
pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata,
bool *deferred_work_generated) {
emap_remap(tsdn, shard->emap, edata, SC_NSIZES, /* slab */ false);
if (edata_slab_get(edata)) {
emap_deregister_interior(tsdn, shard->emap, edata);
/*
* The slab state of the extent isn't cleared. It may be used
* by the pai implementation, e.g. to make caching decisions.
*/
}
edata_addr_set(edata, edata_base_get(edata));
edata_szind_set(edata, SC_NSIZES);
pa_nactive_sub(shard, edata_size_get(edata) >> LG_PAGE);
pai_t *pai = pa_get_pai(shard, edata);
pai_dalloc(tsdn, pai, edata, deferred_work_generated);
}
bool
pa_shard_retain_grow_limit_get_set(tsdn_t *tsdn, pa_shard_t *shard,
size_t *old_limit, size_t *new_limit) {
return pac_retain_grow_limit_get_set(tsdn, &shard->pac, old_limit,
new_limit);
}
bool
pa_decay_ms_set(tsdn_t *tsdn, pa_shard_t *shard, extent_state_t state,
ssize_t decay_ms, pac_purge_eagerness_t eagerness) {
return pac_decay_ms_set(tsdn, &shard->pac, state, decay_ms, eagerness);
}
ssize_t
pa_decay_ms_get(pa_shard_t *shard, extent_state_t state) {
return pac_decay_ms_get(&shard->pac, state);
}
void
pa_shard_set_deferral_allowed(tsdn_t *tsdn, pa_shard_t *shard,
bool deferral_allowed) {
if (pa_shard_uses_hpa(shard)) {
hpa_shard_set_deferral_allowed(tsdn, &shard->hpa_shard,
deferral_allowed);
}
}
void
pa_shard_do_deferred_work(tsdn_t *tsdn, pa_shard_t *shard) {
if (pa_shard_uses_hpa(shard)) {
hpa_shard_do_deferred_work(tsdn, &shard->hpa_shard);
}
}
/*
* Get time until next deferred work ought to happen. If there are multiple
* things that have been deferred, this function calculates the time until
* the soonest of those things.
*/
uint64_t
pa_shard_time_until_deferred_work(tsdn_t *tsdn, pa_shard_t *shard) {
uint64_t time = pai_time_until_deferred_work(tsdn, &shard->pac.pai);
if (time == BACKGROUND_THREAD_DEFERRED_MIN) {
return time;
}
if (pa_shard_uses_hpa(shard)) {
uint64_t hpa =
pai_time_until_deferred_work(tsdn, &shard->hpa_shard.pai);
if (hpa < time) {
time = hpa;
}
}
return time;
}

View file

@ -0,0 +1,191 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
/*
* This file is logically part of the PA module. While pa.c contains the core
* allocator functionality, this file contains boring integration functionality;
* things like the pre- and post- fork handlers, and stats merging for CTL
* refreshes.
*/
void
pa_shard_prefork0(tsdn_t *tsdn, pa_shard_t *shard) {
malloc_mutex_prefork(tsdn, &shard->pac.decay_dirty.mtx);
malloc_mutex_prefork(tsdn, &shard->pac.decay_muzzy.mtx);
}
void
pa_shard_prefork2(tsdn_t *tsdn, pa_shard_t *shard) {
if (shard->ever_used_hpa) {
sec_prefork2(tsdn, &shard->hpa_sec);
}
}
void
pa_shard_prefork3(tsdn_t *tsdn, pa_shard_t *shard) {
malloc_mutex_prefork(tsdn, &shard->pac.grow_mtx);
if (shard->ever_used_hpa) {
hpa_shard_prefork3(tsdn, &shard->hpa_shard);
}
}
void
pa_shard_prefork4(tsdn_t *tsdn, pa_shard_t *shard) {
ecache_prefork(tsdn, &shard->pac.ecache_dirty);
ecache_prefork(tsdn, &shard->pac.ecache_muzzy);
ecache_prefork(tsdn, &shard->pac.ecache_retained);
if (shard->ever_used_hpa) {
hpa_shard_prefork4(tsdn, &shard->hpa_shard);
}
}
void
pa_shard_prefork5(tsdn_t *tsdn, pa_shard_t *shard) {
edata_cache_prefork(tsdn, &shard->edata_cache);
}
void
pa_shard_postfork_parent(tsdn_t *tsdn, pa_shard_t *shard) {
edata_cache_postfork_parent(tsdn, &shard->edata_cache);
ecache_postfork_parent(tsdn, &shard->pac.ecache_dirty);
ecache_postfork_parent(tsdn, &shard->pac.ecache_muzzy);
ecache_postfork_parent(tsdn, &shard->pac.ecache_retained);
malloc_mutex_postfork_parent(tsdn, &shard->pac.grow_mtx);
malloc_mutex_postfork_parent(tsdn, &shard->pac.decay_dirty.mtx);
malloc_mutex_postfork_parent(tsdn, &shard->pac.decay_muzzy.mtx);
if (shard->ever_used_hpa) {
sec_postfork_parent(tsdn, &shard->hpa_sec);
hpa_shard_postfork_parent(tsdn, &shard->hpa_shard);
}
}
void
pa_shard_postfork_child(tsdn_t *tsdn, pa_shard_t *shard) {
edata_cache_postfork_child(tsdn, &shard->edata_cache);
ecache_postfork_child(tsdn, &shard->pac.ecache_dirty);
ecache_postfork_child(tsdn, &shard->pac.ecache_muzzy);
ecache_postfork_child(tsdn, &shard->pac.ecache_retained);
malloc_mutex_postfork_child(tsdn, &shard->pac.grow_mtx);
malloc_mutex_postfork_child(tsdn, &shard->pac.decay_dirty.mtx);
malloc_mutex_postfork_child(tsdn, &shard->pac.decay_muzzy.mtx);
if (shard->ever_used_hpa) {
sec_postfork_child(tsdn, &shard->hpa_sec);
hpa_shard_postfork_child(tsdn, &shard->hpa_shard);
}
}
void
pa_shard_basic_stats_merge(pa_shard_t *shard, size_t *nactive, size_t *ndirty,
size_t *nmuzzy) {
*nactive += atomic_load_zu(&shard->nactive, ATOMIC_RELAXED);
*ndirty += ecache_npages_get(&shard->pac.ecache_dirty);
*nmuzzy += ecache_npages_get(&shard->pac.ecache_muzzy);
}
void
pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard,
pa_shard_stats_t *pa_shard_stats_out, pac_estats_t *estats_out,
hpa_shard_stats_t *hpa_stats_out, sec_stats_t *sec_stats_out,
size_t *resident) {
cassert(config_stats);
pa_shard_stats_out->pac_stats.retained +=
ecache_npages_get(&shard->pac.ecache_retained) << LG_PAGE;
pa_shard_stats_out->edata_avail += atomic_load_zu(
&shard->edata_cache.count, ATOMIC_RELAXED);
size_t resident_pgs = 0;
resident_pgs += atomic_load_zu(&shard->nactive, ATOMIC_RELAXED);
resident_pgs += ecache_npages_get(&shard->pac.ecache_dirty);
*resident += (resident_pgs << LG_PAGE);
/* Dirty decay stats */
locked_inc_u64_unsynchronized(
&pa_shard_stats_out->pac_stats.decay_dirty.npurge,
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
&shard->pac.stats->decay_dirty.npurge));
locked_inc_u64_unsynchronized(
&pa_shard_stats_out->pac_stats.decay_dirty.nmadvise,
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
&shard->pac.stats->decay_dirty.nmadvise));
locked_inc_u64_unsynchronized(
&pa_shard_stats_out->pac_stats.decay_dirty.purged,
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
&shard->pac.stats->decay_dirty.purged));
/* Muzzy decay stats */
locked_inc_u64_unsynchronized(
&pa_shard_stats_out->pac_stats.decay_muzzy.npurge,
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
&shard->pac.stats->decay_muzzy.npurge));
locked_inc_u64_unsynchronized(
&pa_shard_stats_out->pac_stats.decay_muzzy.nmadvise,
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
&shard->pac.stats->decay_muzzy.nmadvise));
locked_inc_u64_unsynchronized(
&pa_shard_stats_out->pac_stats.decay_muzzy.purged,
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
&shard->pac.stats->decay_muzzy.purged));
atomic_load_add_store_zu(&pa_shard_stats_out->pac_stats.abandoned_vm,
atomic_load_zu(&shard->pac.stats->abandoned_vm, ATOMIC_RELAXED));
for (pszind_t i = 0; i < SC_NPSIZES; i++) {
size_t dirty, muzzy, retained, dirty_bytes, muzzy_bytes,
retained_bytes;
dirty = ecache_nextents_get(&shard->pac.ecache_dirty, i);
muzzy = ecache_nextents_get(&shard->pac.ecache_muzzy, i);
retained = ecache_nextents_get(&shard->pac.ecache_retained, i);
dirty_bytes = ecache_nbytes_get(&shard->pac.ecache_dirty, i);
muzzy_bytes = ecache_nbytes_get(&shard->pac.ecache_muzzy, i);
retained_bytes = ecache_nbytes_get(&shard->pac.ecache_retained,
i);
estats_out[i].ndirty = dirty;
estats_out[i].nmuzzy = muzzy;
estats_out[i].nretained = retained;
estats_out[i].dirty_bytes = dirty_bytes;
estats_out[i].muzzy_bytes = muzzy_bytes;
estats_out[i].retained_bytes = retained_bytes;
}
if (shard->ever_used_hpa) {
hpa_shard_stats_merge(tsdn, &shard->hpa_shard, hpa_stats_out);
sec_stats_merge(tsdn, &shard->hpa_sec, sec_stats_out);
}
}
static void
pa_shard_mtx_stats_read_single(tsdn_t *tsdn, mutex_prof_data_t *mutex_prof_data,
malloc_mutex_t *mtx, int ind) {
malloc_mutex_lock(tsdn, mtx);
malloc_mutex_prof_read(tsdn, &mutex_prof_data[ind], mtx);
malloc_mutex_unlock(tsdn, mtx);
}
void
pa_shard_mtx_stats_read(tsdn_t *tsdn, pa_shard_t *shard,
mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes]) {
pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
&shard->edata_cache.mtx, arena_prof_mutex_extent_avail);
pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
&shard->pac.ecache_dirty.mtx, arena_prof_mutex_extents_dirty);
pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
&shard->pac.ecache_muzzy.mtx, arena_prof_mutex_extents_muzzy);
pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
&shard->pac.ecache_retained.mtx, arena_prof_mutex_extents_retained);
pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
&shard->pac.decay_dirty.mtx, arena_prof_mutex_decay_dirty);
pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
&shard->pac.decay_muzzy.mtx, arena_prof_mutex_decay_muzzy);
if (shard->ever_used_hpa) {
pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
&shard->hpa_shard.mtx, arena_prof_mutex_hpa_shard);
pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
&shard->hpa_shard.grow_mtx,
arena_prof_mutex_hpa_shard_grow);
sec_mutex_stats_read(tsdn, &shard->hpa_sec,
&mutex_prof_data[arena_prof_mutex_hpa_sec]);
}
}

587
BeefRT/JEMalloc/src/pac.c Normal file
View file

@ -0,0 +1,587 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/pac.h"
#include "jemalloc/internal/san.h"
static edata_t *pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size,
size_t alignment, bool zero, bool guarded, bool frequent_reuse,
bool *deferred_work_generated);
static bool pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated);
static bool pac_shrink_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
size_t old_size, size_t new_size, bool *deferred_work_generated);
static void pac_dalloc_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
bool *deferred_work_generated);
static uint64_t pac_time_until_deferred_work(tsdn_t *tsdn, pai_t *self);
static inline void
pac_decay_data_get(pac_t *pac, extent_state_t state,
decay_t **r_decay, pac_decay_stats_t **r_decay_stats, ecache_t **r_ecache) {
switch(state) {
case extent_state_dirty:
*r_decay = &pac->decay_dirty;
*r_decay_stats = &pac->stats->decay_dirty;
*r_ecache = &pac->ecache_dirty;
return;
case extent_state_muzzy:
*r_decay = &pac->decay_muzzy;
*r_decay_stats = &pac->stats->decay_muzzy;
*r_ecache = &pac->ecache_muzzy;
return;
default:
unreachable();
}
}
bool
pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
edata_cache_t *edata_cache, nstime_t *cur_time,
size_t pac_oversize_threshold, ssize_t dirty_decay_ms,
ssize_t muzzy_decay_ms, pac_stats_t *pac_stats, malloc_mutex_t *stats_mtx) {
unsigned ind = base_ind_get(base);
/*
* Delay coalescing for dirty extents despite the disruptive effect on
* memory layout for best-fit extent allocation, since cached extents
* are likely to be reused soon after deallocation, and the cost of
* merging/splitting extents is non-trivial.
*/
if (ecache_init(tsdn, &pac->ecache_dirty, extent_state_dirty, ind,
/* delay_coalesce */ true)) {
return true;
}
/*
* Coalesce muzzy extents immediately, because operations on them are in
* the critical path much less often than for dirty extents.
*/
if (ecache_init(tsdn, &pac->ecache_muzzy, extent_state_muzzy, ind,
/* delay_coalesce */ false)) {
return true;
}
/*
* Coalesce retained extents immediately, in part because they will
* never be evicted (and therefore there's no opportunity for delayed
* coalescing), but also because operations on retained extents are not
* in the critical path.
*/
if (ecache_init(tsdn, &pac->ecache_retained, extent_state_retained,
ind, /* delay_coalesce */ false)) {
return true;
}
exp_grow_init(&pac->exp_grow);
if (malloc_mutex_init(&pac->grow_mtx, "extent_grow",
WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
return true;
}
atomic_store_zu(&pac->oversize_threshold, pac_oversize_threshold,
ATOMIC_RELAXED);
if (decay_init(&pac->decay_dirty, cur_time, dirty_decay_ms)) {
return true;
}
if (decay_init(&pac->decay_muzzy, cur_time, muzzy_decay_ms)) {
return true;
}
if (san_bump_alloc_init(&pac->sba)) {
return true;
}
pac->base = base;
pac->emap = emap;
pac->edata_cache = edata_cache;
pac->stats = pac_stats;
pac->stats_mtx = stats_mtx;
atomic_store_zu(&pac->extent_sn_next, 0, ATOMIC_RELAXED);
pac->pai.alloc = &pac_alloc_impl;
pac->pai.alloc_batch = &pai_alloc_batch_default;
pac->pai.expand = &pac_expand_impl;
pac->pai.shrink = &pac_shrink_impl;
pac->pai.dalloc = &pac_dalloc_impl;
pac->pai.dalloc_batch = &pai_dalloc_batch_default;
pac->pai.time_until_deferred_work = &pac_time_until_deferred_work;
return false;
}
static inline bool
pac_may_have_muzzy(pac_t *pac) {
return pac_decay_ms_get(pac, extent_state_muzzy) != 0;
}
static edata_t *
pac_alloc_real(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size,
size_t alignment, bool zero, bool guarded) {
assert(!guarded || alignment <= PAGE);
edata_t *edata = ecache_alloc(tsdn, pac, ehooks, &pac->ecache_dirty,
NULL, size, alignment, zero, guarded);
if (edata == NULL && pac_may_have_muzzy(pac)) {
edata = ecache_alloc(tsdn, pac, ehooks, &pac->ecache_muzzy,
NULL, size, alignment, zero, guarded);
}
if (edata == NULL) {
edata = ecache_alloc_grow(tsdn, pac, ehooks,
&pac->ecache_retained, NULL, size, alignment, zero,
guarded);
if (config_stats && edata != NULL) {
atomic_fetch_add_zu(&pac->stats->pac_mapped, size,
ATOMIC_RELAXED);
}
}
return edata;
}
static edata_t *
pac_alloc_new_guarded(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size,
size_t alignment, bool zero, bool frequent_reuse) {
assert(alignment <= PAGE);
edata_t *edata;
if (san_bump_enabled() && frequent_reuse) {
edata = san_bump_alloc(tsdn, &pac->sba, pac, ehooks, size,
zero);
} else {
size_t size_with_guards = san_two_side_guarded_sz(size);
/* Alloc a non-guarded extent first.*/
edata = pac_alloc_real(tsdn, pac, ehooks, size_with_guards,
/* alignment */ PAGE, zero, /* guarded */ false);
if (edata != NULL) {
/* Add guards around it. */
assert(edata_size_get(edata) == size_with_guards);
san_guard_pages_two_sided(tsdn, ehooks, edata,
pac->emap, true);
}
}
assert(edata == NULL || (edata_guarded_get(edata) &&
edata_size_get(edata) == size));
return edata;
}
static edata_t *
pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment,
bool zero, bool guarded, bool frequent_reuse,
bool *deferred_work_generated) {
pac_t *pac = (pac_t *)self;
ehooks_t *ehooks = pac_ehooks_get(pac);
edata_t *edata = NULL;
/*
* The condition is an optimization - not frequently reused guarded
* allocations are never put in the ecache. pac_alloc_real also
* doesn't grow retained for guarded allocations. So pac_alloc_real
* for such allocations would always return NULL.
* */
if (!guarded || frequent_reuse) {
edata = pac_alloc_real(tsdn, pac, ehooks, size, alignment,
zero, guarded);
}
if (edata == NULL && guarded) {
/* No cached guarded extents; creating a new one. */
edata = pac_alloc_new_guarded(tsdn, pac, ehooks, size,
alignment, zero, frequent_reuse);
}
return edata;
}
static bool
pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
size_t new_size, bool zero, bool *deferred_work_generated) {
pac_t *pac = (pac_t *)self;
ehooks_t *ehooks = pac_ehooks_get(pac);
size_t mapped_add = 0;
size_t expand_amount = new_size - old_size;
if (ehooks_merge_will_fail(ehooks)) {
return true;
}
edata_t *trail = ecache_alloc(tsdn, pac, ehooks, &pac->ecache_dirty,
edata, expand_amount, PAGE, zero, /* guarded*/ false);
if (trail == NULL) {
trail = ecache_alloc(tsdn, pac, ehooks, &pac->ecache_muzzy,
edata, expand_amount, PAGE, zero, /* guarded*/ false);
}
if (trail == NULL) {
trail = ecache_alloc_grow(tsdn, pac, ehooks,
&pac->ecache_retained, edata, expand_amount, PAGE, zero,
/* guarded */ false);
mapped_add = expand_amount;
}
if (trail == NULL) {
return true;
}
if (extent_merge_wrapper(tsdn, pac, ehooks, edata, trail)) {
extent_dalloc_wrapper(tsdn, pac, ehooks, trail);
return true;
}
if (config_stats && mapped_add > 0) {
atomic_fetch_add_zu(&pac->stats->pac_mapped, mapped_add,
ATOMIC_RELAXED);
}
return false;
}
static bool
pac_shrink_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
size_t new_size, bool *deferred_work_generated) {
pac_t *pac = (pac_t *)self;
ehooks_t *ehooks = pac_ehooks_get(pac);
size_t shrink_amount = old_size - new_size;
if (ehooks_split_will_fail(ehooks)) {
return true;
}
edata_t *trail = extent_split_wrapper(tsdn, pac, ehooks, edata,
new_size, shrink_amount, /* holding_core_locks */ false);
if (trail == NULL) {
return true;
}
ecache_dalloc(tsdn, pac, ehooks, &pac->ecache_dirty, trail);
*deferred_work_generated = true;
return false;
}
static void
pac_dalloc_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
bool *deferred_work_generated) {
pac_t *pac = (pac_t *)self;
ehooks_t *ehooks = pac_ehooks_get(pac);
if (edata_guarded_get(edata)) {
/*
* Because cached guarded extents do exact fit only, large
* guarded extents are restored on dalloc eagerly (otherwise
* they will not be reused efficiently). Slab sizes have a
* limited number of size classes, and tend to cycle faster.
*
* In the case where coalesce is restrained (VirtualFree on
* Windows), guarded extents are also not cached -- otherwise
* during arena destroy / reset, the retained extents would not
* be whole regions (i.e. they are split between regular and
* guarded).
*/
if (!edata_slab_get(edata) || !maps_coalesce) {
assert(edata_size_get(edata) >= SC_LARGE_MINCLASS ||
!maps_coalesce);
san_unguard_pages_two_sided(tsdn, ehooks, edata,
pac->emap);
}
}
ecache_dalloc(tsdn, pac, ehooks, &pac->ecache_dirty, edata);
/* Purging of deallocated pages is deferred */
*deferred_work_generated = true;
}
static inline uint64_t
pac_ns_until_purge(tsdn_t *tsdn, decay_t *decay, size_t npages) {
if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
/* Use minimal interval if decay is contended. */
return BACKGROUND_THREAD_DEFERRED_MIN;
}
uint64_t result = decay_ns_until_purge(decay, npages,
ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD);
malloc_mutex_unlock(tsdn, &decay->mtx);
return result;
}
static uint64_t
pac_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) {
uint64_t time;
pac_t *pac = (pac_t *)self;
time = pac_ns_until_purge(tsdn,
&pac->decay_dirty,
ecache_npages_get(&pac->ecache_dirty));
if (time == BACKGROUND_THREAD_DEFERRED_MIN) {
return time;
}
uint64_t muzzy = pac_ns_until_purge(tsdn,
&pac->decay_muzzy,
ecache_npages_get(&pac->ecache_muzzy));
if (muzzy < time) {
time = muzzy;
}
return time;
}
bool
pac_retain_grow_limit_get_set(tsdn_t *tsdn, pac_t *pac, size_t *old_limit,
size_t *new_limit) {
pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0);
if (new_limit != NULL) {
size_t limit = *new_limit;
/* Grow no more than the new limit. */
if ((new_ind = sz_psz2ind(limit + 1) - 1) >= SC_NPSIZES) {
return true;
}
}
malloc_mutex_lock(tsdn, &pac->grow_mtx);
if (old_limit != NULL) {
*old_limit = sz_pind2sz(pac->exp_grow.limit);
}
if (new_limit != NULL) {
pac->exp_grow.limit = new_ind;
}
malloc_mutex_unlock(tsdn, &pac->grow_mtx);
return false;
}
static size_t
pac_stash_decayed(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
size_t npages_limit, size_t npages_decay_max,
edata_list_inactive_t *result) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
ehooks_t *ehooks = pac_ehooks_get(pac);
/* Stash extents according to npages_limit. */
size_t nstashed = 0;
while (nstashed < npages_decay_max) {
edata_t *edata = ecache_evict(tsdn, pac, ehooks, ecache,
npages_limit);
if (edata == NULL) {
break;
}
edata_list_inactive_append(result, edata);
nstashed += edata_size_get(edata) >> LG_PAGE;
}
return nstashed;
}
static size_t
pac_decay_stashed(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay,
edata_list_inactive_t *decay_extents) {
bool err;
size_t nmadvise = 0;
size_t nunmapped = 0;
size_t npurged = 0;
ehooks_t *ehooks = pac_ehooks_get(pac);
bool try_muzzy = !fully_decay
&& pac_decay_ms_get(pac, extent_state_muzzy) != 0;
for (edata_t *edata = edata_list_inactive_first(decay_extents); edata !=
NULL; edata = edata_list_inactive_first(decay_extents)) {
edata_list_inactive_remove(decay_extents, edata);
size_t size = edata_size_get(edata);
size_t npages = size >> LG_PAGE;
nmadvise++;
npurged += npages;
switch (ecache->state) {
case extent_state_active:
not_reached();
case extent_state_dirty:
if (try_muzzy) {
err = extent_purge_lazy_wrapper(tsdn, ehooks,
edata, /* offset */ 0, size);
if (!err) {
ecache_dalloc(tsdn, pac, ehooks,
&pac->ecache_muzzy, edata);
break;
}
}
JEMALLOC_FALLTHROUGH;
case extent_state_muzzy:
extent_dalloc_wrapper(tsdn, pac, ehooks, edata);
nunmapped += npages;
break;
case extent_state_retained:
default:
not_reached();
}
}
if (config_stats) {
LOCKEDINT_MTX_LOCK(tsdn, *pac->stats_mtx);
locked_inc_u64(tsdn, LOCKEDINT_MTX(*pac->stats_mtx),
&decay_stats->npurge, 1);
locked_inc_u64(tsdn, LOCKEDINT_MTX(*pac->stats_mtx),
&decay_stats->nmadvise, nmadvise);
locked_inc_u64(tsdn, LOCKEDINT_MTX(*pac->stats_mtx),
&decay_stats->purged, npurged);
LOCKEDINT_MTX_UNLOCK(tsdn, *pac->stats_mtx);
atomic_fetch_sub_zu(&pac->stats->pac_mapped,
nunmapped << LG_PAGE, ATOMIC_RELAXED);
}
return npurged;
}
/*
* npages_limit: Decay at most npages_decay_max pages without violating the
* invariant: (ecache_npages_get(ecache) >= npages_limit). We need an upper
* bound on number of pages in order to prevent unbounded growth (namely in
* stashed), otherwise unbounded new pages could be added to extents during the
* current decay run, so that the purging thread never finishes.
*/
static void
pac_decay_to_limit(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay,
size_t npages_limit, size_t npages_decay_max) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 1);
if (decay->purging || npages_decay_max == 0) {
return;
}
decay->purging = true;
malloc_mutex_unlock(tsdn, &decay->mtx);
edata_list_inactive_t decay_extents;
edata_list_inactive_init(&decay_extents);
size_t npurge = pac_stash_decayed(tsdn, pac, ecache, npages_limit,
npages_decay_max, &decay_extents);
if (npurge != 0) {
size_t npurged = pac_decay_stashed(tsdn, pac, decay,
decay_stats, ecache, fully_decay, &decay_extents);
assert(npurged == npurge);
}
malloc_mutex_lock(tsdn, &decay->mtx);
decay->purging = false;
}
void
pac_decay_all(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay) {
malloc_mutex_assert_owner(tsdn, &decay->mtx);
pac_decay_to_limit(tsdn, pac, decay, decay_stats, ecache, fully_decay,
/* npages_limit */ 0, ecache_npages_get(ecache));
}
static void
pac_decay_try_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
pac_decay_stats_t *decay_stats, ecache_t *ecache,
size_t current_npages, size_t npages_limit) {
if (current_npages > npages_limit) {
pac_decay_to_limit(tsdn, pac, decay, decay_stats, ecache,
/* fully_decay */ false, npages_limit,
current_npages - npages_limit);
}
}
bool
pac_maybe_decay_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
pac_decay_stats_t *decay_stats, ecache_t *ecache,
pac_purge_eagerness_t eagerness) {
malloc_mutex_assert_owner(tsdn, &decay->mtx);
/* Purge all or nothing if the option is disabled. */
ssize_t decay_ms = decay_ms_read(decay);
if (decay_ms <= 0) {
if (decay_ms == 0) {
pac_decay_to_limit(tsdn, pac, decay, decay_stats,
ecache, /* fully_decay */ false,
/* npages_limit */ 0, ecache_npages_get(ecache));
}
return false;
}
/*
* If the deadline has been reached, advance to the current epoch and
* purge to the new limit if necessary. Note that dirty pages created
* during the current epoch are not subject to purge until a future
* epoch, so as a result purging only happens during epoch advances, or
* being triggered by background threads (scheduled event).
*/
nstime_t time;
nstime_init_update(&time);
size_t npages_current = ecache_npages_get(ecache);
bool epoch_advanced = decay_maybe_advance_epoch(decay, &time,
npages_current);
if (eagerness == PAC_PURGE_ALWAYS
|| (epoch_advanced && eagerness == PAC_PURGE_ON_EPOCH_ADVANCE)) {
size_t npages_limit = decay_npages_limit_get(decay);
pac_decay_try_purge(tsdn, pac, decay, decay_stats, ecache,
npages_current, npages_limit);
}
return epoch_advanced;
}
bool
pac_decay_ms_set(tsdn_t *tsdn, pac_t *pac, extent_state_t state,
ssize_t decay_ms, pac_purge_eagerness_t eagerness) {
decay_t *decay;
pac_decay_stats_t *decay_stats;
ecache_t *ecache;
pac_decay_data_get(pac, state, &decay, &decay_stats, &ecache);
if (!decay_ms_valid(decay_ms)) {
return true;
}
malloc_mutex_lock(tsdn, &decay->mtx);
/*
* Restart decay backlog from scratch, which may cause many dirty pages
* to be immediately purged. It would conceptually be possible to map
* the old backlog onto the new backlog, but there is no justification
* for such complexity since decay_ms changes are intended to be
* infrequent, either between the {-1, 0, >0} states, or a one-time
* arbitrary change during initial arena configuration.
*/
nstime_t cur_time;
nstime_init_update(&cur_time);
decay_reinit(decay, &cur_time, decay_ms);
pac_maybe_decay_purge(tsdn, pac, decay, decay_stats, ecache, eagerness);
malloc_mutex_unlock(tsdn, &decay->mtx);
return false;
}
ssize_t
pac_decay_ms_get(pac_t *pac, extent_state_t state) {
decay_t *decay;
pac_decay_stats_t *decay_stats;
ecache_t *ecache;
pac_decay_data_get(pac, state, &decay, &decay_stats, &ecache);
return decay_ms_read(decay);
}
void
pac_reset(tsdn_t *tsdn, pac_t *pac) {
/*
* No-op for now; purging is still done at the arena-level. It should
* get moved in here, though.
*/
(void)tsdn;
(void)pac;
}
void
pac_destroy(tsdn_t *tsdn, pac_t *pac) {
assert(ecache_npages_get(&pac->ecache_dirty) == 0);
assert(ecache_npages_get(&pac->ecache_muzzy) == 0);
/*
* Iterate over the retained extents and destroy them. This gives the
* extent allocator underlying the extent hooks an opportunity to unmap
* all retained memory without having to keep its own metadata
* structures. In practice, virtual memory for dss-allocated extents is
* leaked here, so best practice is to avoid dss for arenas to be
* destroyed, or provide custom extent hooks that track retained
* dss-based extents for later reuse.
*/
ehooks_t *ehooks = pac_ehooks_get(pac);
edata_t *edata;
while ((edata = ecache_evict(tsdn, pac, ehooks,
&pac->ecache_retained, 0)) != NULL) {
extent_destroy_wrapper(tsdn, pac, ehooks, edata);
}
}

824
BeefRT/JEMalloc/src/pages.c Normal file
View file

@ -0,0 +1,824 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/malloc_io.h"
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
#include <sys/sysctl.h>
#ifdef __FreeBSD__
#include <vm/vm_param.h>
#endif
#endif
#ifdef __NetBSD__
#include <sys/bitops.h> /* ilog2 */
#endif
#ifdef JEMALLOC_HAVE_VM_MAKE_TAG
#define PAGES_FD_TAG VM_MAKE_TAG(101U)
#else
#define PAGES_FD_TAG -1
#endif
/******************************************************************************/
/* Data. */
/* Actual operating system page size, detected during bootstrap, <= PAGE. */
static size_t os_page;
#ifndef _WIN32
# define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE)
# define PAGES_PROT_DECOMMIT (PROT_NONE)
static int mmap_flags;
#endif
static bool os_overcommits;
const char *thp_mode_names[] = {
"default",
"always",
"never",
"not supported"
};
thp_mode_t opt_thp = THP_MODE_DEFAULT;
thp_mode_t init_system_thp_mode;
/* Runtime support for lazy purge. Irrelevant when !pages_can_purge_lazy. */
static bool pages_can_purge_lazy_runtime = true;
#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
static int madvise_dont_need_zeros_is_faulty = -1;
/**
* Check that MADV_DONTNEED will actually zero pages on subsequent access.
*
* Since qemu does not support this, yet [1], and you can get very tricky
* assert if you will run program with jemalloc in use under qemu:
*
* <jemalloc>: ../contrib/jemalloc/src/extent.c:1195: Failed assertion: "p[i] == 0"
*
* [1]: https://patchwork.kernel.org/patch/10576637/
*/
static int madvise_MADV_DONTNEED_zeroes_pages()
{
int works = -1;
size_t size = PAGE;
void * addr = mmap(NULL, size, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
if (addr == MAP_FAILED) {
malloc_write("<jemalloc>: Cannot allocate memory for "
"MADV_DONTNEED check\n");
if (opt_abort) {
abort();
}
}
memset(addr, 'A', size);
if (madvise(addr, size, MADV_DONTNEED) == 0) {
works = memchr(addr, 'A', size) == NULL;
} else {
/*
* If madvise() does not support MADV_DONTNEED, then we can
* call it anyway, and use it's return code.
*/
works = 1;
}
if (munmap(addr, size) != 0) {
malloc_write("<jemalloc>: Cannot deallocate memory for "
"MADV_DONTNEED check\n");
if (opt_abort) {
abort();
}
}
return works;
}
#endif
/******************************************************************************/
/*
* Function prototypes for static functions that are referenced prior to
* definition.
*/
static void os_pages_unmap(void *addr, size_t size);
/******************************************************************************/
static void *
os_pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr);
assert(ALIGNMENT_CEILING(size, os_page) == size);
assert(size != 0);
if (os_overcommits) {
*commit = true;
}
void *ret;
#ifdef _WIN32
/*
* If VirtualAlloc can't allocate at the given address when one is
* given, it fails and returns NULL.
*/
ret = VirtualAlloc(addr, size, MEM_RESERVE | (*commit ? MEM_COMMIT : 0),
PAGE_READWRITE);
#else
/*
* We don't use MAP_FIXED here, because it can cause the *replacement*
* of existing mappings, and we only want to create new mappings.
*/
{
#ifdef __NetBSD__
/*
* On NetBSD PAGE for a platform is defined to the
* maximum page size of all machine architectures
* for that platform, so that we can use the same
* binaries across all machine architectures.
*/
if (alignment > os_page || PAGE > os_page) {
unsigned int a = ilog2(MAX(alignment, PAGE));
mmap_flags |= MAP_ALIGNED(a);
}
#endif
int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
ret = mmap(addr, size, prot, mmap_flags, PAGES_FD_TAG, 0);
}
assert(ret != NULL);
if (ret == MAP_FAILED) {
ret = NULL;
} else if (addr != NULL && ret != addr) {
/*
* We succeeded in mapping memory, but not in the right place.
*/
os_pages_unmap(ret, size);
ret = NULL;
}
#endif
assert(ret == NULL || (addr == NULL && ret != addr) || (addr != NULL &&
ret == addr));
return ret;
}
static void *
os_pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
bool *commit) {
void *ret = (void *)((uintptr_t)addr + leadsize);
assert(alloc_size >= leadsize + size);
#ifdef _WIN32
os_pages_unmap(addr, alloc_size);
void *new_addr = os_pages_map(ret, size, PAGE, commit);
if (new_addr == ret) {
return ret;
}
if (new_addr != NULL) {
os_pages_unmap(new_addr, size);
}
return NULL;
#else
size_t trailsize = alloc_size - leadsize - size;
if (leadsize != 0) {
os_pages_unmap(addr, leadsize);
}
if (trailsize != 0) {
os_pages_unmap((void *)((uintptr_t)ret + size), trailsize);
}
return ret;
#endif
}
static void
os_pages_unmap(void *addr, size_t size) {
assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr);
assert(ALIGNMENT_CEILING(size, os_page) == size);
#ifdef _WIN32
if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
#else
if (munmap(addr, size) == -1)
#endif
{
char buf[BUFERROR_BUF];
buferror(get_errno(), buf, sizeof(buf));
malloc_printf("<jemalloc>: Error in "
#ifdef _WIN32
"VirtualFree"
#else
"munmap"
#endif
"(): %s\n", buf);
if (opt_abort) {
abort();
}
}
}
static void *
pages_map_slow(size_t size, size_t alignment, bool *commit) {
size_t alloc_size = size + alignment - os_page;
/* Beware size_t wrap-around. */
if (alloc_size < size) {
return NULL;
}
void *ret;
do {
void *pages = os_pages_map(NULL, alloc_size, alignment, commit);
if (pages == NULL) {
return NULL;
}
size_t leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment)
- (uintptr_t)pages;
ret = os_pages_trim(pages, alloc_size, leadsize, size, commit);
} while (ret == NULL);
assert(ret != NULL);
assert(PAGE_ADDR2BASE(ret) == ret);
return ret;
}
void *
pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
assert(alignment >= PAGE);
assert(ALIGNMENT_ADDR2BASE(addr, alignment) == addr);
#if defined(__FreeBSD__) && defined(MAP_EXCL)
/*
* FreeBSD has mechanisms both to mmap at specific address without
* touching existing mappings, and to mmap with specific alignment.
*/
{
if (os_overcommits) {
*commit = true;
}
int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
int flags = mmap_flags;
if (addr != NULL) {
flags |= MAP_FIXED | MAP_EXCL;
} else {
unsigned alignment_bits = ffs_zu(alignment);
assert(alignment_bits > 0);
flags |= MAP_ALIGNED(alignment_bits);
}
void *ret = mmap(addr, size, prot, flags, -1, 0);
if (ret == MAP_FAILED) {
ret = NULL;
}
return ret;
}
#endif
/*
* Ideally, there would be a way to specify alignment to mmap() (like
* NetBSD has), but in the absence of such a feature, we have to work
* hard to efficiently create aligned mappings. The reliable, but
* slow method is to create a mapping that is over-sized, then trim the
* excess. However, that always results in one or two calls to
* os_pages_unmap(), and it can leave holes in the process's virtual
* memory map if memory grows downward.
*
* Optimistically try mapping precisely the right amount before falling
* back to the slow method, with the expectation that the optimistic
* approach works most of the time.
*/
void *ret = os_pages_map(addr, size, os_page, commit);
if (ret == NULL || ret == addr) {
return ret;
}
assert(addr == NULL);
if (ALIGNMENT_ADDR2OFFSET(ret, alignment) != 0) {
os_pages_unmap(ret, size);
return pages_map_slow(size, alignment, commit);
}
assert(PAGE_ADDR2BASE(ret) == ret);
return ret;
}
void
pages_unmap(void *addr, size_t size) {
assert(PAGE_ADDR2BASE(addr) == addr);
assert(PAGE_CEILING(size) == size);
os_pages_unmap(addr, size);
}
static bool
os_pages_commit(void *addr, size_t size, bool commit) {
assert(PAGE_ADDR2BASE(addr) == addr);
assert(PAGE_CEILING(size) == size);
#ifdef _WIN32
return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT,
PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT)));
#else
{
int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED,
PAGES_FD_TAG, 0);
if (result == MAP_FAILED) {
return true;
}
if (result != addr) {
/*
* We succeeded in mapping memory, but not in the right
* place.
*/
os_pages_unmap(result, size);
return true;
}
return false;
}
#endif
}
static bool
pages_commit_impl(void *addr, size_t size, bool commit) {
if (os_overcommits) {
return true;
}
return os_pages_commit(addr, size, commit);
}
bool
pages_commit(void *addr, size_t size) {
return pages_commit_impl(addr, size, true);
}
bool
pages_decommit(void *addr, size_t size) {
return pages_commit_impl(addr, size, false);
}
void
pages_mark_guards(void *head, void *tail) {
assert(head != NULL || tail != NULL);
assert(head == NULL || tail == NULL ||
(uintptr_t)head < (uintptr_t)tail);
#ifdef JEMALLOC_HAVE_MPROTECT
if (head != NULL) {
mprotect(head, PAGE, PROT_NONE);
}
if (tail != NULL) {
mprotect(tail, PAGE, PROT_NONE);
}
#else
/* Decommit sets to PROT_NONE / MEM_DECOMMIT. */
if (head != NULL) {
os_pages_commit(head, PAGE, false);
}
if (tail != NULL) {
os_pages_commit(tail, PAGE, false);
}
#endif
}
void
pages_unmark_guards(void *head, void *tail) {
assert(head != NULL || tail != NULL);
assert(head == NULL || tail == NULL ||
(uintptr_t)head < (uintptr_t)tail);
#ifdef JEMALLOC_HAVE_MPROTECT
bool head_and_tail = (head != NULL) && (tail != NULL);
size_t range = head_and_tail ?
(uintptr_t)tail - (uintptr_t)head + PAGE :
SIZE_T_MAX;
/*
* The amount of work that the kernel does in mprotect depends on the
* range argument. SC_LARGE_MINCLASS is an arbitrary threshold chosen
* to prevent kernel from doing too much work that would outweigh the
* savings of performing one less system call.
*/
bool ranged_mprotect = head_and_tail && range <= SC_LARGE_MINCLASS;
if (ranged_mprotect) {
mprotect(head, range, PROT_READ | PROT_WRITE);
} else {
if (head != NULL) {
mprotect(head, PAGE, PROT_READ | PROT_WRITE);
}
if (tail != NULL) {
mprotect(tail, PAGE, PROT_READ | PROT_WRITE);
}
}
#else
if (head != NULL) {
os_pages_commit(head, PAGE, true);
}
if (tail != NULL) {
os_pages_commit(tail, PAGE, true);
}
#endif
}
bool
pages_purge_lazy(void *addr, size_t size) {
assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr);
assert(PAGE_CEILING(size) == size);
if (!pages_can_purge_lazy) {
return true;
}
if (!pages_can_purge_lazy_runtime) {
/*
* Built with lazy purge enabled, but detected it was not
* supported on the current system.
*/
return true;
}
#ifdef _WIN32
VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
return false;
#elif defined(JEMALLOC_PURGE_MADVISE_FREE)
return (madvise(addr, size,
# ifdef MADV_FREE
MADV_FREE
# else
JEMALLOC_MADV_FREE
# endif
) != 0);
#elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
!defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)
return (madvise(addr, size, MADV_DONTNEED) != 0);
#elif defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED) && \
!defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS)
return (posix_madvise(addr, size, POSIX_MADV_DONTNEED) != 0);
#else
not_reached();
#endif
}
bool
pages_purge_forced(void *addr, size_t size) {
assert(PAGE_ADDR2BASE(addr) == addr);
assert(PAGE_CEILING(size) == size);
if (!pages_can_purge_forced) {
return true;
}
#if defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)
return (unlikely(madvise_dont_need_zeros_is_faulty) ||
madvise(addr, size, MADV_DONTNEED) != 0);
#elif defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED) && \
defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS)
return (unlikely(madvise_dont_need_zeros_is_faulty) ||
posix_madvise(addr, size, POSIX_MADV_DONTNEED) != 0);
#elif defined(JEMALLOC_MAPS_COALESCE)
/* Try to overlay a new demand-zeroed mapping. */
return pages_commit(addr, size);
#else
not_reached();
#endif
}
static bool
pages_huge_impl(void *addr, size_t size, bool aligned) {
if (aligned) {
assert(HUGEPAGE_ADDR2BASE(addr) == addr);
assert(HUGEPAGE_CEILING(size) == size);
}
#if defined(JEMALLOC_HAVE_MADVISE_HUGE)
return (madvise(addr, size, MADV_HUGEPAGE) != 0);
#elif defined(JEMALLOC_HAVE_MEMCNTL)
struct memcntl_mha m = {0};
m.mha_cmd = MHA_MAPSIZE_VA;
m.mha_pagesize = HUGEPAGE;
return (memcntl(addr, size, MC_HAT_ADVISE, (caddr_t)&m, 0, 0) == 0);
#else
return true;
#endif
}
bool
pages_huge(void *addr, size_t size) {
return pages_huge_impl(addr, size, true);
}
static bool
pages_huge_unaligned(void *addr, size_t size) {
return pages_huge_impl(addr, size, false);
}
static bool
pages_nohuge_impl(void *addr, size_t size, bool aligned) {
if (aligned) {
assert(HUGEPAGE_ADDR2BASE(addr) == addr);
assert(HUGEPAGE_CEILING(size) == size);
}
#ifdef JEMALLOC_HAVE_MADVISE_HUGE
return (madvise(addr, size, MADV_NOHUGEPAGE) != 0);
#else
return false;
#endif
}
bool
pages_nohuge(void *addr, size_t size) {
return pages_nohuge_impl(addr, size, true);
}
static bool
pages_nohuge_unaligned(void *addr, size_t size) {
return pages_nohuge_impl(addr, size, false);
}
bool
pages_dontdump(void *addr, size_t size) {
assert(PAGE_ADDR2BASE(addr) == addr);
assert(PAGE_CEILING(size) == size);
#if defined(JEMALLOC_MADVISE_DONTDUMP)
return madvise(addr, size, MADV_DONTDUMP) != 0;
#elif defined(JEMALLOC_MADVISE_NOCORE)
return madvise(addr, size, MADV_NOCORE) != 0;
#else
return false;
#endif
}
bool
pages_dodump(void *addr, size_t size) {
assert(PAGE_ADDR2BASE(addr) == addr);
assert(PAGE_CEILING(size) == size);
#if defined(JEMALLOC_MADVISE_DONTDUMP)
return madvise(addr, size, MADV_DODUMP) != 0;
#elif defined(JEMALLOC_MADVISE_NOCORE)
return madvise(addr, size, MADV_CORE) != 0;
#else
return false;
#endif
}
static size_t
os_page_detect(void) {
#ifdef _WIN32
SYSTEM_INFO si;
GetSystemInfo(&si);
return si.dwPageSize;
#elif defined(__FreeBSD__)
/*
* This returns the value obtained from
* the auxv vector, avoiding a syscall.
*/
return getpagesize();
#else
long result = sysconf(_SC_PAGESIZE);
if (result == -1) {
return LG_PAGE;
}
return (size_t)result;
#endif
}
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
static bool
os_overcommits_sysctl(void) {
int vm_overcommit;
size_t sz;
sz = sizeof(vm_overcommit);
#if defined(__FreeBSD__) && defined(VM_OVERCOMMIT)
int mib[2];
mib[0] = CTL_VM;
mib[1] = VM_OVERCOMMIT;
if (sysctl(mib, 2, &vm_overcommit, &sz, NULL, 0) != 0) {
return false; /* Error. */
}
#else
if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0) {
return false; /* Error. */
}
#endif
return ((vm_overcommit & 0x3) == 0);
}
#endif
#ifdef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
/*
* Use syscall(2) rather than {open,read,close}(2) when possible to avoid
* reentry during bootstrapping if another library has interposed system call
* wrappers.
*/
static bool
os_overcommits_proc(void) {
int fd;
char buf[1];
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
#if defined(O_CLOEXEC)
fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY |
O_CLOEXEC);
#else
fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY);
if (fd != -1) {
fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
}
#endif
#elif defined(JEMALLOC_USE_SYSCALL) && defined(SYS_openat)
#if defined(O_CLOEXEC)
fd = (int)syscall(SYS_openat,
AT_FDCWD, "/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC);
#else
fd = (int)syscall(SYS_openat,
AT_FDCWD, "/proc/sys/vm/overcommit_memory", O_RDONLY);
if (fd != -1) {
fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
}
#endif
#else
#if defined(O_CLOEXEC)
fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC);
#else
fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY);
if (fd != -1) {
fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
}
#endif
#endif
if (fd == -1) {
return false; /* Error. */
}
ssize_t nread = malloc_read_fd(fd, &buf, sizeof(buf));
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
syscall(SYS_close, fd);
#else
close(fd);
#endif
if (nread < 1) {
return false; /* Error. */
}
/*
* /proc/sys/vm/overcommit_memory meanings:
* 0: Heuristic overcommit.
* 1: Always overcommit.
* 2: Never overcommit.
*/
return (buf[0] == '0' || buf[0] == '1');
}
#endif
void
pages_set_thp_state (void *ptr, size_t size) {
if (opt_thp == thp_mode_default || opt_thp == init_system_thp_mode) {
return;
}
assert(opt_thp != thp_mode_not_supported &&
init_system_thp_mode != thp_mode_not_supported);
if (opt_thp == thp_mode_always
&& init_system_thp_mode != thp_mode_never) {
assert(init_system_thp_mode == thp_mode_default);
pages_huge_unaligned(ptr, size);
} else if (opt_thp == thp_mode_never) {
assert(init_system_thp_mode == thp_mode_default ||
init_system_thp_mode == thp_mode_always);
pages_nohuge_unaligned(ptr, size);
}
}
static void
init_thp_state(void) {
if (!have_madvise_huge && !have_memcntl) {
if (metadata_thp_enabled() && opt_abort) {
malloc_write("<jemalloc>: no MADV_HUGEPAGE support\n");
abort();
}
goto label_error;
}
#if defined(JEMALLOC_HAVE_MADVISE_HUGE)
static const char sys_state_madvise[] = "always [madvise] never\n";
static const char sys_state_always[] = "[always] madvise never\n";
static const char sys_state_never[] = "always madvise [never]\n";
char buf[sizeof(sys_state_madvise)];
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
int fd = (int)syscall(SYS_open,
"/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
#elif defined(JEMALLOC_USE_SYSCALL) && defined(SYS_openat)
int fd = (int)syscall(SYS_openat,
AT_FDCWD, "/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
#else
int fd = open("/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
#endif
if (fd == -1) {
goto label_error;
}
ssize_t nread = malloc_read_fd(fd, &buf, sizeof(buf));
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
syscall(SYS_close, fd);
#else
close(fd);
#endif
if (nread < 0) {
goto label_error;
}
if (strncmp(buf, sys_state_madvise, (size_t)nread) == 0) {
init_system_thp_mode = thp_mode_default;
} else if (strncmp(buf, sys_state_always, (size_t)nread) == 0) {
init_system_thp_mode = thp_mode_always;
} else if (strncmp(buf, sys_state_never, (size_t)nread) == 0) {
init_system_thp_mode = thp_mode_never;
} else {
goto label_error;
}
return;
#elif defined(JEMALLOC_HAVE_MEMCNTL)
init_system_thp_mode = thp_mode_default;
return;
#endif
label_error:
opt_thp = init_system_thp_mode = thp_mode_not_supported;
}
bool
pages_boot(void) {
os_page = os_page_detect();
if (os_page > PAGE) {
malloc_write("<jemalloc>: Unsupported system page size\n");
if (opt_abort) {
abort();
}
return true;
}
#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
if (!opt_trust_madvise) {
madvise_dont_need_zeros_is_faulty = !madvise_MADV_DONTNEED_zeroes_pages();
if (madvise_dont_need_zeros_is_faulty) {
malloc_write("<jemalloc>: MADV_DONTNEED does not work (memset will be used instead)\n");
malloc_write("<jemalloc>: (This is the expected behaviour if you are running under QEMU)\n");
}
} else {
/* In case opt_trust_madvise is disable,
* do not do runtime check */
madvise_dont_need_zeros_is_faulty = 0;
}
#endif
#ifndef _WIN32
mmap_flags = MAP_PRIVATE | MAP_ANON;
#endif
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
os_overcommits = os_overcommits_sysctl();
#elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY)
os_overcommits = os_overcommits_proc();
# ifdef MAP_NORESERVE
if (os_overcommits) {
mmap_flags |= MAP_NORESERVE;
}
# endif
#elif defined(__NetBSD__)
os_overcommits = true;
#else
os_overcommits = false;
#endif
init_thp_state();
#ifdef __FreeBSD__
/*
* FreeBSD doesn't need the check; madvise(2) is known to work.
*/
#else
/* Detect lazy purge runtime support. */
if (pages_can_purge_lazy) {
bool committed = false;
void *madv_free_page = os_pages_map(NULL, PAGE, PAGE, &committed);
if (madv_free_page == NULL) {
return true;
}
assert(pages_can_purge_lazy_runtime);
if (pages_purge_lazy(madv_free_page, PAGE)) {
pages_can_purge_lazy_runtime = false;
}
os_pages_unmap(madv_free_page, PAGE);
}
#endif
return false;
}

31
BeefRT/JEMalloc/src/pai.c Normal file
View file

@ -0,0 +1,31 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
size_t
pai_alloc_batch_default(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs,
edata_list_active_t *results, bool *deferred_work_generated) {
for (size_t i = 0; i < nallocs; i++) {
bool deferred_by_alloc = false;
edata_t *edata = pai_alloc(tsdn, self, size, PAGE,
/* zero */ false, /* guarded */ false,
/* frequent_reuse */ false, &deferred_by_alloc);
*deferred_work_generated |= deferred_by_alloc;
if (edata == NULL) {
return i;
}
edata_list_active_append(results, edata);
}
return nallocs;
}
void
pai_dalloc_batch_default(tsdn_t *tsdn, pai_t *self,
edata_list_active_t *list, bool *deferred_work_generated) {
edata_t *edata;
while ((edata = edata_list_active_first(list)) != NULL) {
bool deferred_by_dalloc = false;
edata_list_active_remove(list, edata);
pai_dalloc(tsdn, self, edata, &deferred_by_dalloc);
*deferred_work_generated |= deferred_by_dalloc;
}
}

View file

@ -0,0 +1,82 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/peak_event.h"
#include "jemalloc/internal/activity_callback.h"
#include "jemalloc/internal/peak.h"
/*
* Update every 64K by default. We're not exposing this as a configuration
* option for now; we don't want to bind ourselves too tightly to any particular
* performance requirements for small values, or guarantee that we'll even be
* able to provide fine-grained accuracy.
*/
#define PEAK_EVENT_WAIT (64 * 1024)
/* Update the peak with current tsd state. */
void
peak_event_update(tsd_t *tsd) {
uint64_t alloc = tsd_thread_allocated_get(tsd);
uint64_t dalloc = tsd_thread_deallocated_get(tsd);
peak_t *peak = tsd_peakp_get(tsd);
peak_update(peak, alloc, dalloc);
}
static void
peak_event_activity_callback(tsd_t *tsd) {
activity_callback_thunk_t *thunk = tsd_activity_callback_thunkp_get(
tsd);
uint64_t alloc = tsd_thread_allocated_get(tsd);
uint64_t dalloc = tsd_thread_deallocated_get(tsd);
if (thunk->callback != NULL) {
thunk->callback(thunk->uctx, alloc, dalloc);
}
}
/* Set current state to zero. */
void
peak_event_zero(tsd_t *tsd) {
uint64_t alloc = tsd_thread_allocated_get(tsd);
uint64_t dalloc = tsd_thread_deallocated_get(tsd);
peak_t *peak = tsd_peakp_get(tsd);
peak_set_zero(peak, alloc, dalloc);
}
uint64_t
peak_event_max(tsd_t *tsd) {
peak_t *peak = tsd_peakp_get(tsd);
return peak_max(peak);
}
uint64_t
peak_alloc_new_event_wait(tsd_t *tsd) {
return PEAK_EVENT_WAIT;
}
uint64_t
peak_alloc_postponed_event_wait(tsd_t *tsd) {
return TE_MIN_START_WAIT;
}
void
peak_alloc_event_handler(tsd_t *tsd, uint64_t elapsed) {
peak_event_update(tsd);
peak_event_activity_callback(tsd);
}
uint64_t
peak_dalloc_new_event_wait(tsd_t *tsd) {
return PEAK_EVENT_WAIT;
}
uint64_t
peak_dalloc_postponed_event_wait(tsd_t *tsd) {
return TE_MIN_START_WAIT;
}
void
peak_dalloc_event_handler(tsd_t *tsd, uint64_t elapsed) {
peak_event_update(tsd);
peak_event_activity_callback(tsd);
}

789
BeefRT/JEMalloc/src/prof.c Normal file
View file

@ -0,0 +1,789 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/counter.h"
#include "jemalloc/internal/prof_data.h"
#include "jemalloc/internal/prof_log.h"
#include "jemalloc/internal/prof_recent.h"
#include "jemalloc/internal/prof_stats.h"
#include "jemalloc/internal/prof_sys.h"
#include "jemalloc/internal/prof_hook.h"
#include "jemalloc/internal/thread_event.h"
/*
* This file implements the profiling "APIs" needed by other parts of jemalloc,
* and also manages the relevant "operational" data, mainly options and mutexes;
* the core profiling data structures are encapsulated in prof_data.c.
*/
/******************************************************************************/
/* Data. */
bool opt_prof = false;
bool opt_prof_active = true;
bool opt_prof_thread_active_init = true;
size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
bool opt_prof_gdump = false;
bool opt_prof_final = false;
bool opt_prof_leak = false;
bool opt_prof_leak_error = false;
bool opt_prof_accum = false;
char opt_prof_prefix[PROF_DUMP_FILENAME_LEN];
bool opt_prof_sys_thread_name = false;
bool opt_prof_unbias = true;
/* Accessed via prof_sample_event_handler(). */
static counter_accum_t prof_idump_accumulated;
/*
* Initialized as opt_prof_active, and accessed via
* prof_active_[gs]et{_unlocked,}().
*/
bool prof_active_state;
static malloc_mutex_t prof_active_mtx;
/*
* Initialized as opt_prof_thread_active_init, and accessed via
* prof_thread_active_init_[gs]et().
*/
static bool prof_thread_active_init;
static malloc_mutex_t prof_thread_active_init_mtx;
/*
* Initialized as opt_prof_gdump, and accessed via
* prof_gdump_[gs]et{_unlocked,}().
*/
bool prof_gdump_val;
static malloc_mutex_t prof_gdump_mtx;
uint64_t prof_interval = 0;
size_t lg_prof_sample;
static uint64_t next_thr_uid;
static malloc_mutex_t next_thr_uid_mtx;
/* Do not dump any profiles until bootstrapping is complete. */
bool prof_booted = false;
/* Logically a prof_backtrace_hook_t. */
atomic_p_t prof_backtrace_hook;
/* Logically a prof_dump_hook_t. */
atomic_p_t prof_dump_hook;
/******************************************************************************/
void
prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx) {
cassert(config_prof);
if (tsd_reentrancy_level_get(tsd) > 0) {
assert((uintptr_t)tctx == (uintptr_t)1U);
return;
}
if ((uintptr_t)tctx > (uintptr_t)1U) {
malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
tctx->prepared = false;
prof_tctx_try_destroy(tsd, tctx);
}
}
void
prof_malloc_sample_object(tsd_t *tsd, const void *ptr, size_t size,
size_t usize, prof_tctx_t *tctx) {
cassert(config_prof);
if (opt_prof_sys_thread_name) {
prof_sys_thread_name_fetch(tsd);
}
edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
ptr);
prof_info_set(tsd, edata, tctx, size);
szind_t szind = sz_size2index(usize);
malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
/*
* We need to do these map lookups while holding the lock, to avoid the
* possibility of races with prof_reset calls, which update the map and
* then acquire the lock. This actually still leaves a data race on the
* contents of the unbias map, but we have not yet gone through and
* atomic-ified the prof module, and compilers are not yet causing us
* issues. The key thing is to make sure that, if we read garbage data,
* the prof_reset call is about to mark our tctx as expired before any
* dumping of our corrupted output is attempted.
*/
size_t shifted_unbiased_cnt = prof_shifted_unbiased_cnt[szind];
size_t unbiased_bytes = prof_unbiased_sz[szind];
tctx->cnts.curobjs++;
tctx->cnts.curobjs_shifted_unbiased += shifted_unbiased_cnt;
tctx->cnts.curbytes += usize;
tctx->cnts.curbytes_unbiased += unbiased_bytes;
if (opt_prof_accum) {
tctx->cnts.accumobjs++;
tctx->cnts.accumobjs_shifted_unbiased += shifted_unbiased_cnt;
tctx->cnts.accumbytes += usize;
tctx->cnts.accumbytes_unbiased += unbiased_bytes;
}
bool record_recent = prof_recent_alloc_prepare(tsd, tctx);
tctx->prepared = false;
malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
if (record_recent) {
assert(tctx == edata_prof_tctx_get(edata));
prof_recent_alloc(tsd, edata, size, usize);
}
if (opt_prof_stats) {
prof_stats_inc(tsd, szind, size);
}
}
void
prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_info_t *prof_info) {
cassert(config_prof);
assert(prof_info != NULL);
prof_tctx_t *tctx = prof_info->alloc_tctx;
assert((uintptr_t)tctx > (uintptr_t)1U);
szind_t szind = sz_size2index(usize);
malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
assert(tctx->cnts.curobjs > 0);
assert(tctx->cnts.curbytes >= usize);
/*
* It's not correct to do equivalent asserts for unbiased bytes, because
* of the potential for races with prof.reset calls. The map contents
* should really be atomic, but we have not atomic-ified the prof module
* yet.
*/
tctx->cnts.curobjs--;
tctx->cnts.curobjs_shifted_unbiased -= prof_shifted_unbiased_cnt[szind];
tctx->cnts.curbytes -= usize;
tctx->cnts.curbytes_unbiased -= prof_unbiased_sz[szind];
prof_try_log(tsd, usize, prof_info);
prof_tctx_try_destroy(tsd, tctx);
if (opt_prof_stats) {
prof_stats_dec(tsd, szind, prof_info->alloc_size);
}
}
prof_tctx_t *
prof_tctx_create(tsd_t *tsd) {
if (!tsd_nominal(tsd) || tsd_reentrancy_level_get(tsd) > 0) {
return NULL;
}
prof_tdata_t *tdata = prof_tdata_get(tsd, true);
if (tdata == NULL) {
return NULL;
}
prof_bt_t bt;
bt_init(&bt, tdata->vec);
prof_backtrace(tsd, &bt);
return prof_lookup(tsd, &bt);
}
/*
* The bodies of this function and prof_leakcheck() are compiled out unless heap
* profiling is enabled, so that it is possible to compile jemalloc with
* floating point support completely disabled. Avoiding floating point code is
* important on memory-constrained systems, but it also enables a workaround for
* versions of glibc that don't properly save/restore floating point registers
* during dynamic lazy symbol loading (which internally calls into whatever
* malloc implementation happens to be integrated into the application). Note
* that some compilers (e.g. gcc 4.8) may use floating point registers for fast
* memory moves, so jemalloc must be compiled with such optimizations disabled
* (e.g.
* -mno-sse) in order for the workaround to be complete.
*/
uint64_t
prof_sample_new_event_wait(tsd_t *tsd) {
#ifdef JEMALLOC_PROF
if (lg_prof_sample == 0) {
return TE_MIN_START_WAIT;
}
/*
* Compute sample interval as a geometrically distributed random
* variable with mean (2^lg_prof_sample).
*
* __ __
* | log(u) | 1
* bytes_until_sample = | -------- |, where p = ---------------
* | log(1-p) | lg_prof_sample
* 2
*
* For more information on the math, see:
*
* Non-Uniform Random Variate Generation
* Luc Devroye
* Springer-Verlag, New York, 1986
* pp 500
* (http://luc.devroye.org/rnbookindex.html)
*
* In the actual computation, there's a non-zero probability that our
* pseudo random number generator generates an exact 0, and to avoid
* log(0), we set u to 1.0 in case r is 0. Therefore u effectively is
* uniformly distributed in (0, 1] instead of [0, 1). Further, rather
* than taking the ceiling, we take the floor and then add 1, since
* otherwise bytes_until_sample would be 0 if u is exactly 1.0.
*/
uint64_t r = prng_lg_range_u64(tsd_prng_statep_get(tsd), 53);
double u = (r == 0U) ? 1.0 : (double)r * (1.0/9007199254740992.0L);
return (uint64_t)(log(u) /
log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
+ (uint64_t)1U;
#else
not_reached();
return TE_MAX_START_WAIT;
#endif
}
uint64_t
prof_sample_postponed_event_wait(tsd_t *tsd) {
/*
* The postponed wait time for prof sample event is computed as if we
* want a new wait time (i.e. as if the event were triggered). If we
* instead postpone to the immediate next allocation, like how we're
* handling the other events, then we can have sampling bias, if e.g.
* the allocation immediately following a reentrancy always comes from
* the same stack trace.
*/
return prof_sample_new_event_wait(tsd);
}
void
prof_sample_event_handler(tsd_t *tsd, uint64_t elapsed) {
cassert(config_prof);
assert(elapsed > 0 && elapsed != TE_INVALID_ELAPSED);
if (prof_interval == 0 || !prof_active_get_unlocked()) {
return;
}
if (counter_accum(tsd_tsdn(tsd), &prof_idump_accumulated, elapsed)) {
prof_idump(tsd_tsdn(tsd));
}
}
static void
prof_fdump(void) {
tsd_t *tsd;
cassert(config_prof);
assert(opt_prof_final);
if (!prof_booted) {
return;
}
tsd = tsd_fetch();
assert(tsd_reentrancy_level_get(tsd) == 0);
prof_fdump_impl(tsd);
}
static bool
prof_idump_accum_init(void) {
cassert(config_prof);
return counter_accum_init(&prof_idump_accumulated, prof_interval);
}
void
prof_idump(tsdn_t *tsdn) {
tsd_t *tsd;
prof_tdata_t *tdata;
cassert(config_prof);
if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) {
return;
}
tsd = tsdn_tsd(tsdn);
if (tsd_reentrancy_level_get(tsd) > 0) {
return;
}
tdata = prof_tdata_get(tsd, true);
if (tdata == NULL) {
return;
}
if (tdata->enq) {
tdata->enq_idump = true;
return;
}
prof_idump_impl(tsd);
}
bool
prof_mdump(tsd_t *tsd, const char *filename) {
cassert(config_prof);
assert(tsd_reentrancy_level_get(tsd) == 0);
if (!opt_prof || !prof_booted) {
return true;
}
return prof_mdump_impl(tsd, filename);
}
void
prof_gdump(tsdn_t *tsdn) {
tsd_t *tsd;
prof_tdata_t *tdata;
cassert(config_prof);
if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) {
return;
}
tsd = tsdn_tsd(tsdn);
if (tsd_reentrancy_level_get(tsd) > 0) {
return;
}
tdata = prof_tdata_get(tsd, false);
if (tdata == NULL) {
return;
}
if (tdata->enq) {
tdata->enq_gdump = true;
return;
}
prof_gdump_impl(tsd);
}
static uint64_t
prof_thr_uid_alloc(tsdn_t *tsdn) {
uint64_t thr_uid;
malloc_mutex_lock(tsdn, &next_thr_uid_mtx);
thr_uid = next_thr_uid;
next_thr_uid++;
malloc_mutex_unlock(tsdn, &next_thr_uid_mtx);
return thr_uid;
}
prof_tdata_t *
prof_tdata_init(tsd_t *tsd) {
return prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0,
NULL, prof_thread_active_init_get(tsd_tsdn(tsd)));
}
prof_tdata_t *
prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) {
uint64_t thr_uid = tdata->thr_uid;
uint64_t thr_discrim = tdata->thr_discrim + 1;
char *thread_name = (tdata->thread_name != NULL) ?
prof_thread_name_alloc(tsd, tdata->thread_name) : NULL;
bool active = tdata->active;
prof_tdata_detach(tsd, tdata);
return prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name,
active);
}
void
prof_tdata_cleanup(tsd_t *tsd) {
prof_tdata_t *tdata;
if (!config_prof) {
return;
}
tdata = tsd_prof_tdata_get(tsd);
if (tdata != NULL) {
prof_tdata_detach(tsd, tdata);
}
}
bool
prof_active_get(tsdn_t *tsdn) {
bool prof_active_current;
prof_active_assert();
malloc_mutex_lock(tsdn, &prof_active_mtx);
prof_active_current = prof_active_state;
malloc_mutex_unlock(tsdn, &prof_active_mtx);
return prof_active_current;
}
bool
prof_active_set(tsdn_t *tsdn, bool active) {
bool prof_active_old;
prof_active_assert();
malloc_mutex_lock(tsdn, &prof_active_mtx);
prof_active_old = prof_active_state;
prof_active_state = active;
malloc_mutex_unlock(tsdn, &prof_active_mtx);
prof_active_assert();
return prof_active_old;
}
const char *
prof_thread_name_get(tsd_t *tsd) {
assert(tsd_reentrancy_level_get(tsd) == 0);
prof_tdata_t *tdata;
tdata = prof_tdata_get(tsd, true);
if (tdata == NULL) {
return "";
}
return (tdata->thread_name != NULL ? tdata->thread_name : "");
}
int
prof_thread_name_set(tsd_t *tsd, const char *thread_name) {
if (opt_prof_sys_thread_name) {
return ENOENT;
} else {
return prof_thread_name_set_impl(tsd, thread_name);
}
}
bool
prof_thread_active_get(tsd_t *tsd) {
assert(tsd_reentrancy_level_get(tsd) == 0);
prof_tdata_t *tdata;
tdata = prof_tdata_get(tsd, true);
if (tdata == NULL) {
return false;
}
return tdata->active;
}
bool
prof_thread_active_set(tsd_t *tsd, bool active) {
assert(tsd_reentrancy_level_get(tsd) == 0);
prof_tdata_t *tdata;
tdata = prof_tdata_get(tsd, true);
if (tdata == NULL) {
return true;
}
tdata->active = active;
return false;
}
bool
prof_thread_active_init_get(tsdn_t *tsdn) {
bool active_init;
malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
active_init = prof_thread_active_init;
malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
return active_init;
}
bool
prof_thread_active_init_set(tsdn_t *tsdn, bool active_init) {
bool active_init_old;
malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
active_init_old = prof_thread_active_init;
prof_thread_active_init = active_init;
malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
return active_init_old;
}
bool
prof_gdump_get(tsdn_t *tsdn) {
bool prof_gdump_current;
malloc_mutex_lock(tsdn, &prof_gdump_mtx);
prof_gdump_current = prof_gdump_val;
malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
return prof_gdump_current;
}
bool
prof_gdump_set(tsdn_t *tsdn, bool gdump) {
bool prof_gdump_old;
malloc_mutex_lock(tsdn, &prof_gdump_mtx);
prof_gdump_old = prof_gdump_val;
prof_gdump_val = gdump;
malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
return prof_gdump_old;
}
void
prof_backtrace_hook_set(prof_backtrace_hook_t hook) {
atomic_store_p(&prof_backtrace_hook, hook, ATOMIC_RELEASE);
}
prof_backtrace_hook_t
prof_backtrace_hook_get() {
return (prof_backtrace_hook_t)atomic_load_p(&prof_backtrace_hook,
ATOMIC_ACQUIRE);
}
void
prof_dump_hook_set(prof_dump_hook_t hook) {
atomic_store_p(&prof_dump_hook, hook, ATOMIC_RELEASE);
}
prof_dump_hook_t
prof_dump_hook_get() {
return (prof_dump_hook_t)atomic_load_p(&prof_dump_hook,
ATOMIC_ACQUIRE);
}
void
prof_boot0(void) {
cassert(config_prof);
memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
sizeof(PROF_PREFIX_DEFAULT));
}
void
prof_boot1(void) {
cassert(config_prof);
/*
* opt_prof must be in its final state before any arenas are
* initialized, so this function must be executed early.
*/
if (opt_prof_leak_error && !opt_prof_leak) {
opt_prof_leak = true;
}
if (opt_prof_leak && !opt_prof) {
/*
* Enable opt_prof, but in such a way that profiles are never
* automatically dumped.
*/
opt_prof = true;
opt_prof_gdump = false;
} else if (opt_prof) {
if (opt_lg_prof_interval >= 0) {
prof_interval = (((uint64_t)1U) <<
opt_lg_prof_interval);
}
}
}
bool
prof_boot2(tsd_t *tsd, base_t *base) {
cassert(config_prof);
/*
* Initialize the global mutexes unconditionally to maintain correct
* stats when opt_prof is false.
*/
if (malloc_mutex_init(&prof_active_mtx, "prof_active",
WITNESS_RANK_PROF_ACTIVE, malloc_mutex_rank_exclusive)) {
return true;
}
if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump",
WITNESS_RANK_PROF_GDUMP, malloc_mutex_rank_exclusive)) {
return true;
}
if (malloc_mutex_init(&prof_thread_active_init_mtx,
"prof_thread_active_init", WITNESS_RANK_PROF_THREAD_ACTIVE_INIT,
malloc_mutex_rank_exclusive)) {
return true;
}
if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx",
WITNESS_RANK_PROF_BT2GCTX, malloc_mutex_rank_exclusive)) {
return true;
}
if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas",
WITNESS_RANK_PROF_TDATAS, malloc_mutex_rank_exclusive)) {
return true;
}
if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid",
WITNESS_RANK_PROF_NEXT_THR_UID, malloc_mutex_rank_exclusive)) {
return true;
}
if (malloc_mutex_init(&prof_stats_mtx, "prof_stats",
WITNESS_RANK_PROF_STATS, malloc_mutex_rank_exclusive)) {
return true;
}
if (malloc_mutex_init(&prof_dump_filename_mtx,
"prof_dump_filename", WITNESS_RANK_PROF_DUMP_FILENAME,
malloc_mutex_rank_exclusive)) {
return true;
}
if (malloc_mutex_init(&prof_dump_mtx, "prof_dump",
WITNESS_RANK_PROF_DUMP, malloc_mutex_rank_exclusive)) {
return true;
}
if (opt_prof) {
lg_prof_sample = opt_lg_prof_sample;
prof_unbias_map_init();
prof_active_state = opt_prof_active;
prof_gdump_val = opt_prof_gdump;
prof_thread_active_init = opt_prof_thread_active_init;
if (prof_data_init(tsd)) {
return true;
}
next_thr_uid = 0;
if (prof_idump_accum_init()) {
return true;
}
if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
atexit(prof_fdump) != 0) {
malloc_write("<jemalloc>: Error in atexit()\n");
if (opt_abort) {
abort();
}
}
if (prof_log_init(tsd)) {
return true;
}
if (prof_recent_init()) {
return true;
}
prof_base = base;
gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), base,
PROF_NCTX_LOCKS * sizeof(malloc_mutex_t), CACHELINE);
if (gctx_locks == NULL) {
return true;
}
for (unsigned i = 0; i < PROF_NCTX_LOCKS; i++) {
if (malloc_mutex_init(&gctx_locks[i], "prof_gctx",
WITNESS_RANK_PROF_GCTX,
malloc_mutex_rank_exclusive)) {
return true;
}
}
tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), base,
PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t), CACHELINE);
if (tdata_locks == NULL) {
return true;
}
for (unsigned i = 0; i < PROF_NTDATA_LOCKS; i++) {
if (malloc_mutex_init(&tdata_locks[i], "prof_tdata",
WITNESS_RANK_PROF_TDATA,
malloc_mutex_rank_exclusive)) {
return true;
}
}
prof_unwind_init();
prof_hooks_init();
}
prof_booted = true;
return false;
}
void
prof_prefork0(tsdn_t *tsdn) {
if (config_prof && opt_prof) {
unsigned i;
malloc_mutex_prefork(tsdn, &prof_dump_mtx);
malloc_mutex_prefork(tsdn, &bt2gctx_mtx);
malloc_mutex_prefork(tsdn, &tdatas_mtx);
for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
malloc_mutex_prefork(tsdn, &tdata_locks[i]);
}
malloc_mutex_prefork(tsdn, &log_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++) {
malloc_mutex_prefork(tsdn, &gctx_locks[i]);
}
malloc_mutex_prefork(tsdn, &prof_recent_dump_mtx);
}
}
void
prof_prefork1(tsdn_t *tsdn) {
if (config_prof && opt_prof) {
counter_prefork(tsdn, &prof_idump_accumulated);
malloc_mutex_prefork(tsdn, &prof_active_mtx);
malloc_mutex_prefork(tsdn, &prof_dump_filename_mtx);
malloc_mutex_prefork(tsdn, &prof_gdump_mtx);
malloc_mutex_prefork(tsdn, &prof_recent_alloc_mtx);
malloc_mutex_prefork(tsdn, &prof_stats_mtx);
malloc_mutex_prefork(tsdn, &next_thr_uid_mtx);
malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx);
}
}
void
prof_postfork_parent(tsdn_t *tsdn) {
if (config_prof && opt_prof) {
unsigned i;
malloc_mutex_postfork_parent(tsdn,
&prof_thread_active_init_mtx);
malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx);
malloc_mutex_postfork_parent(tsdn, &prof_stats_mtx);
malloc_mutex_postfork_parent(tsdn, &prof_recent_alloc_mtx);
malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx);
malloc_mutex_postfork_parent(tsdn, &prof_dump_filename_mtx);
malloc_mutex_postfork_parent(tsdn, &prof_active_mtx);
counter_postfork_parent(tsdn, &prof_idump_accumulated);
malloc_mutex_postfork_parent(tsdn, &prof_recent_dump_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++) {
malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]);
}
malloc_mutex_postfork_parent(tsdn, &log_mtx);
for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]);
}
malloc_mutex_postfork_parent(tsdn, &tdatas_mtx);
malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx);
malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx);
}
}
void
prof_postfork_child(tsdn_t *tsdn) {
if (config_prof && opt_prof) {
unsigned i;
malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx);
malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx);
malloc_mutex_postfork_child(tsdn, &prof_stats_mtx);
malloc_mutex_postfork_child(tsdn, &prof_recent_alloc_mtx);
malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx);
malloc_mutex_postfork_child(tsdn, &prof_dump_filename_mtx);
malloc_mutex_postfork_child(tsdn, &prof_active_mtx);
counter_postfork_child(tsdn, &prof_idump_accumulated);
malloc_mutex_postfork_child(tsdn, &prof_recent_dump_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++) {
malloc_mutex_postfork_child(tsdn, &gctx_locks[i]);
}
malloc_mutex_postfork_child(tsdn, &log_mtx);
for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
malloc_mutex_postfork_child(tsdn, &tdata_locks[i]);
}
malloc_mutex_postfork_child(tsdn, &tdatas_mtx);
malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx);
malloc_mutex_postfork_child(tsdn, &prof_dump_mtx);
}
}
/******************************************************************************/

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,717 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/buf_writer.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/emitter.h"
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/prof_data.h"
#include "jemalloc/internal/prof_log.h"
#include "jemalloc/internal/prof_sys.h"
bool opt_prof_log = false;
typedef enum prof_logging_state_e prof_logging_state_t;
enum prof_logging_state_e {
prof_logging_state_stopped,
prof_logging_state_started,
prof_logging_state_dumping
};
/*
* - stopped: log_start never called, or previous log_stop has completed.
* - started: log_start called, log_stop not called yet. Allocations are logged.
* - dumping: log_stop called but not finished; samples are not logged anymore.
*/
prof_logging_state_t prof_logging_state = prof_logging_state_stopped;
/* Used in unit tests. */
static bool prof_log_dummy = false;
/* Incremented for every log file that is output. */
static uint64_t log_seq = 0;
static char log_filename[
/* Minimize memory bloat for non-prof builds. */
#ifdef JEMALLOC_PROF
PATH_MAX +
#endif
1];
/* Timestamp for most recent call to log_start(). */
static nstime_t log_start_timestamp;
/* Increment these when adding to the log_bt and log_thr linked lists. */
static size_t log_bt_index = 0;
static size_t log_thr_index = 0;
/* Linked list node definitions. These are only used in this file. */
typedef struct prof_bt_node_s prof_bt_node_t;
struct prof_bt_node_s {
prof_bt_node_t *next;
size_t index;
prof_bt_t bt;
/* Variable size backtrace vector pointed to by bt. */
void *vec[1];
};
typedef struct prof_thr_node_s prof_thr_node_t;
struct prof_thr_node_s {
prof_thr_node_t *next;
size_t index;
uint64_t thr_uid;
/* Variable size based on thr_name_sz. */
char name[1];
};
typedef struct prof_alloc_node_s prof_alloc_node_t;
/* This is output when logging sampled allocations. */
struct prof_alloc_node_s {
prof_alloc_node_t *next;
/* Indices into an array of thread data. */
size_t alloc_thr_ind;
size_t free_thr_ind;
/* Indices into an array of backtraces. */
size_t alloc_bt_ind;
size_t free_bt_ind;
uint64_t alloc_time_ns;
uint64_t free_time_ns;
size_t usize;
};
/*
* Created on the first call to prof_try_log and deleted on prof_log_stop.
* These are the backtraces and threads that have already been logged by an
* allocation.
*/
static bool log_tables_initialized = false;
static ckh_t log_bt_node_set;
static ckh_t log_thr_node_set;
/* Store linked lists for logged data. */
static prof_bt_node_t *log_bt_first = NULL;
static prof_bt_node_t *log_bt_last = NULL;
static prof_thr_node_t *log_thr_first = NULL;
static prof_thr_node_t *log_thr_last = NULL;
static prof_alloc_node_t *log_alloc_first = NULL;
static prof_alloc_node_t *log_alloc_last = NULL;
/* Protects the prof_logging_state and any log_{...} variable. */
malloc_mutex_t log_mtx;
/******************************************************************************/
/*
* Function prototypes for static functions that are referenced prior to
* definition.
*/
/* Hashtable functions for log_bt_node_set and log_thr_node_set. */
static void prof_thr_node_hash(const void *key, size_t r_hash[2]);
static bool prof_thr_node_keycomp(const void *k1, const void *k2);
static void prof_bt_node_hash(const void *key, size_t r_hash[2]);
static bool prof_bt_node_keycomp(const void *k1, const void *k2);
/******************************************************************************/
static size_t
prof_log_bt_index(tsd_t *tsd, prof_bt_t *bt) {
assert(prof_logging_state == prof_logging_state_started);
malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx);
prof_bt_node_t dummy_node;
dummy_node.bt = *bt;
prof_bt_node_t *node;
/* See if this backtrace is already cached in the table. */
if (ckh_search(&log_bt_node_set, (void *)(&dummy_node),
(void **)(&node), NULL)) {
size_t sz = offsetof(prof_bt_node_t, vec) +
(bt->len * sizeof(void *));
prof_bt_node_t *new_node = (prof_bt_node_t *)
iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL,
true, arena_get(TSDN_NULL, 0, true), true);
if (log_bt_first == NULL) {
log_bt_first = new_node;
log_bt_last = new_node;
} else {
log_bt_last->next = new_node;
log_bt_last = new_node;
}
new_node->next = NULL;
new_node->index = log_bt_index;
/*
* Copy the backtrace: bt is inside a tdata or gctx, which
* might die before prof_log_stop is called.
*/
new_node->bt.len = bt->len;
memcpy(new_node->vec, bt->vec, bt->len * sizeof(void *));
new_node->bt.vec = new_node->vec;
log_bt_index++;
ckh_insert(tsd, &log_bt_node_set, (void *)new_node, NULL);
return new_node->index;
} else {
return node->index;
}
}
static size_t
prof_log_thr_index(tsd_t *tsd, uint64_t thr_uid, const char *name) {
assert(prof_logging_state == prof_logging_state_started);
malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx);
prof_thr_node_t dummy_node;
dummy_node.thr_uid = thr_uid;
prof_thr_node_t *node;
/* See if this thread is already cached in the table. */
if (ckh_search(&log_thr_node_set, (void *)(&dummy_node),
(void **)(&node), NULL)) {
size_t sz = offsetof(prof_thr_node_t, name) + strlen(name) + 1;
prof_thr_node_t *new_node = (prof_thr_node_t *)
iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL,
true, arena_get(TSDN_NULL, 0, true), true);
if (log_thr_first == NULL) {
log_thr_first = new_node;
log_thr_last = new_node;
} else {
log_thr_last->next = new_node;
log_thr_last = new_node;
}
new_node->next = NULL;
new_node->index = log_thr_index;
new_node->thr_uid = thr_uid;
strcpy(new_node->name, name);
log_thr_index++;
ckh_insert(tsd, &log_thr_node_set, (void *)new_node, NULL);
return new_node->index;
} else {
return node->index;
}
}
JEMALLOC_COLD
void
prof_try_log(tsd_t *tsd, size_t usize, prof_info_t *prof_info) {
cassert(config_prof);
prof_tctx_t *tctx = prof_info->alloc_tctx;
malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
prof_tdata_t *cons_tdata = prof_tdata_get(tsd, false);
if (cons_tdata == NULL) {
/*
* We decide not to log these allocations. cons_tdata will be
* NULL only when the current thread is in a weird state (e.g.
* it's being destroyed).
*/
return;
}
malloc_mutex_lock(tsd_tsdn(tsd), &log_mtx);
if (prof_logging_state != prof_logging_state_started) {
goto label_done;
}
if (!log_tables_initialized) {
bool err1 = ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS,
prof_bt_node_hash, prof_bt_node_keycomp);
bool err2 = ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS,
prof_thr_node_hash, prof_thr_node_keycomp);
if (err1 || err2) {
goto label_done;
}
log_tables_initialized = true;
}
nstime_t alloc_time = prof_info->alloc_time;
nstime_t free_time;
nstime_prof_init_update(&free_time);
size_t sz = sizeof(prof_alloc_node_t);
prof_alloc_node_t *new_node = (prof_alloc_node_t *)
iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, true,
arena_get(TSDN_NULL, 0, true), true);
const char *prod_thr_name = (tctx->tdata->thread_name == NULL)?
"" : tctx->tdata->thread_name;
const char *cons_thr_name = prof_thread_name_get(tsd);
prof_bt_t bt;
/* Initialize the backtrace, using the buffer in tdata to store it. */
bt_init(&bt, cons_tdata->vec);
prof_backtrace(tsd, &bt);
prof_bt_t *cons_bt = &bt;
/* We haven't destroyed tctx yet, so gctx should be good to read. */
prof_bt_t *prod_bt = &tctx->gctx->bt;
new_node->next = NULL;
new_node->alloc_thr_ind = prof_log_thr_index(tsd, tctx->tdata->thr_uid,
prod_thr_name);
new_node->free_thr_ind = prof_log_thr_index(tsd, cons_tdata->thr_uid,
cons_thr_name);
new_node->alloc_bt_ind = prof_log_bt_index(tsd, prod_bt);
new_node->free_bt_ind = prof_log_bt_index(tsd, cons_bt);
new_node->alloc_time_ns = nstime_ns(&alloc_time);
new_node->free_time_ns = nstime_ns(&free_time);
new_node->usize = usize;
if (log_alloc_first == NULL) {
log_alloc_first = new_node;
log_alloc_last = new_node;
} else {
log_alloc_last->next = new_node;
log_alloc_last = new_node;
}
label_done:
malloc_mutex_unlock(tsd_tsdn(tsd), &log_mtx);
}
static void
prof_bt_node_hash(const void *key, size_t r_hash[2]) {
const prof_bt_node_t *bt_node = (prof_bt_node_t *)key;
prof_bt_hash((void *)(&bt_node->bt), r_hash);
}
static bool
prof_bt_node_keycomp(const void *k1, const void *k2) {
const prof_bt_node_t *bt_node1 = (prof_bt_node_t *)k1;
const prof_bt_node_t *bt_node2 = (prof_bt_node_t *)k2;
return prof_bt_keycomp((void *)(&bt_node1->bt),
(void *)(&bt_node2->bt));
}
static void
prof_thr_node_hash(const void *key, size_t r_hash[2]) {
const prof_thr_node_t *thr_node = (prof_thr_node_t *)key;
hash(&thr_node->thr_uid, sizeof(uint64_t), 0x94122f35U, r_hash);
}
static bool
prof_thr_node_keycomp(const void *k1, const void *k2) {
const prof_thr_node_t *thr_node1 = (prof_thr_node_t *)k1;
const prof_thr_node_t *thr_node2 = (prof_thr_node_t *)k2;
return thr_node1->thr_uid == thr_node2->thr_uid;
}
/* Used in unit tests. */
size_t
prof_log_bt_count(void) {
cassert(config_prof);
size_t cnt = 0;
prof_bt_node_t *node = log_bt_first;
while (node != NULL) {
cnt++;
node = node->next;
}
return cnt;
}
/* Used in unit tests. */
size_t
prof_log_alloc_count(void) {
cassert(config_prof);
size_t cnt = 0;
prof_alloc_node_t *node = log_alloc_first;
while (node != NULL) {
cnt++;
node = node->next;
}
return cnt;
}
/* Used in unit tests. */
size_t
prof_log_thr_count(void) {
cassert(config_prof);
size_t cnt = 0;
prof_thr_node_t *node = log_thr_first;
while (node != NULL) {
cnt++;
node = node->next;
}
return cnt;
}
/* Used in unit tests. */
bool
prof_log_is_logging(void) {
cassert(config_prof);
return prof_logging_state == prof_logging_state_started;
}
/* Used in unit tests. */
bool
prof_log_rep_check(void) {
cassert(config_prof);
if (prof_logging_state == prof_logging_state_stopped
&& log_tables_initialized) {
return true;
}
if (log_bt_last != NULL && log_bt_last->next != NULL) {
return true;
}
if (log_thr_last != NULL && log_thr_last->next != NULL) {
return true;
}
if (log_alloc_last != NULL && log_alloc_last->next != NULL) {
return true;
}
size_t bt_count = prof_log_bt_count();
size_t thr_count = prof_log_thr_count();
size_t alloc_count = prof_log_alloc_count();
if (prof_logging_state == prof_logging_state_stopped) {
if (bt_count != 0 || thr_count != 0 || alloc_count || 0) {
return true;
}
}
prof_alloc_node_t *node = log_alloc_first;
while (node != NULL) {
if (node->alloc_bt_ind >= bt_count) {
return true;
}
if (node->free_bt_ind >= bt_count) {
return true;
}
if (node->alloc_thr_ind >= thr_count) {
return true;
}
if (node->free_thr_ind >= thr_count) {
return true;
}
if (node->alloc_time_ns > node->free_time_ns) {
return true;
}
node = node->next;
}
return false;
}
/* Used in unit tests. */
void
prof_log_dummy_set(bool new_value) {
cassert(config_prof);
prof_log_dummy = new_value;
}
/* Used as an atexit function to stop logging on exit. */
static void
prof_log_stop_final(void) {
tsd_t *tsd = tsd_fetch();
prof_log_stop(tsd_tsdn(tsd));
}
JEMALLOC_COLD
bool
prof_log_start(tsdn_t *tsdn, const char *filename) {
cassert(config_prof);
if (!opt_prof) {
return true;
}
bool ret = false;
malloc_mutex_lock(tsdn, &log_mtx);
static bool prof_log_atexit_called = false;
if (!prof_log_atexit_called) {
prof_log_atexit_called = true;
if (atexit(prof_log_stop_final) != 0) {
malloc_write("<jemalloc>: Error in atexit() "
"for logging\n");
if (opt_abort) {
abort();
}
ret = true;
goto label_done;
}
}
if (prof_logging_state != prof_logging_state_stopped) {
ret = true;
} else if (filename == NULL) {
/* Make default name. */
prof_get_default_filename(tsdn, log_filename, log_seq);
log_seq++;
prof_logging_state = prof_logging_state_started;
} else if (strlen(filename) >= PROF_DUMP_FILENAME_LEN) {
ret = true;
} else {
strcpy(log_filename, filename);
prof_logging_state = prof_logging_state_started;
}
if (!ret) {
nstime_prof_init_update(&log_start_timestamp);
}
label_done:
malloc_mutex_unlock(tsdn, &log_mtx);
return ret;
}
struct prof_emitter_cb_arg_s {
int fd;
ssize_t ret;
};
static void
prof_emitter_write_cb(void *opaque, const char *to_write) {
struct prof_emitter_cb_arg_s *arg =
(struct prof_emitter_cb_arg_s *)opaque;
size_t bytes = strlen(to_write);
if (prof_log_dummy) {
return;
}
arg->ret = malloc_write_fd(arg->fd, to_write, bytes);
}
/*
* prof_log_emit_{...} goes through the appropriate linked list, emitting each
* node to the json and deallocating it.
*/
static void
prof_log_emit_threads(tsd_t *tsd, emitter_t *emitter) {
emitter_json_array_kv_begin(emitter, "threads");
prof_thr_node_t *thr_node = log_thr_first;
prof_thr_node_t *thr_old_node;
while (thr_node != NULL) {
emitter_json_object_begin(emitter);
emitter_json_kv(emitter, "thr_uid", emitter_type_uint64,
&thr_node->thr_uid);
char *thr_name = thr_node->name;
emitter_json_kv(emitter, "thr_name", emitter_type_string,
&thr_name);
emitter_json_object_end(emitter);
thr_old_node = thr_node;
thr_node = thr_node->next;
idalloctm(tsd_tsdn(tsd), thr_old_node, NULL, NULL, true, true);
}
emitter_json_array_end(emitter);
}
static void
prof_log_emit_traces(tsd_t *tsd, emitter_t *emitter) {
emitter_json_array_kv_begin(emitter, "stack_traces");
prof_bt_node_t *bt_node = log_bt_first;
prof_bt_node_t *bt_old_node;
/*
* Calculate how many hex digits we need: twice number of bytes, two for
* "0x", and then one more for terminating '\0'.
*/
char buf[2 * sizeof(intptr_t) + 3];
size_t buf_sz = sizeof(buf);
while (bt_node != NULL) {
emitter_json_array_begin(emitter);
size_t i;
for (i = 0; i < bt_node->bt.len; i++) {
malloc_snprintf(buf, buf_sz, "%p", bt_node->bt.vec[i]);
char *trace_str = buf;
emitter_json_value(emitter, emitter_type_string,
&trace_str);
}
emitter_json_array_end(emitter);
bt_old_node = bt_node;
bt_node = bt_node->next;
idalloctm(tsd_tsdn(tsd), bt_old_node, NULL, NULL, true, true);
}
emitter_json_array_end(emitter);
}
static void
prof_log_emit_allocs(tsd_t *tsd, emitter_t *emitter) {
emitter_json_array_kv_begin(emitter, "allocations");
prof_alloc_node_t *alloc_node = log_alloc_first;
prof_alloc_node_t *alloc_old_node;
while (alloc_node != NULL) {
emitter_json_object_begin(emitter);
emitter_json_kv(emitter, "alloc_thread", emitter_type_size,
&alloc_node->alloc_thr_ind);
emitter_json_kv(emitter, "free_thread", emitter_type_size,
&alloc_node->free_thr_ind);
emitter_json_kv(emitter, "alloc_trace", emitter_type_size,
&alloc_node->alloc_bt_ind);
emitter_json_kv(emitter, "free_trace", emitter_type_size,
&alloc_node->free_bt_ind);
emitter_json_kv(emitter, "alloc_timestamp",
emitter_type_uint64, &alloc_node->alloc_time_ns);
emitter_json_kv(emitter, "free_timestamp", emitter_type_uint64,
&alloc_node->free_time_ns);
emitter_json_kv(emitter, "usize", emitter_type_uint64,
&alloc_node->usize);
emitter_json_object_end(emitter);
alloc_old_node = alloc_node;
alloc_node = alloc_node->next;
idalloctm(tsd_tsdn(tsd), alloc_old_node, NULL, NULL, true,
true);
}
emitter_json_array_end(emitter);
}
static void
prof_log_emit_metadata(emitter_t *emitter) {
emitter_json_object_kv_begin(emitter, "info");
nstime_t now;
nstime_prof_init_update(&now);
uint64_t ns = nstime_ns(&now) - nstime_ns(&log_start_timestamp);
emitter_json_kv(emitter, "duration", emitter_type_uint64, &ns);
char *vers = JEMALLOC_VERSION;
emitter_json_kv(emitter, "version",
emitter_type_string, &vers);
emitter_json_kv(emitter, "lg_sample_rate",
emitter_type_int, &lg_prof_sample);
const char *res_type = prof_time_res_mode_names[opt_prof_time_res];
emitter_json_kv(emitter, "prof_time_resolution", emitter_type_string,
&res_type);
int pid = prof_getpid();
emitter_json_kv(emitter, "pid", emitter_type_int, &pid);
emitter_json_object_end(emitter);
}
#define PROF_LOG_STOP_BUFSIZE PROF_DUMP_BUFSIZE
JEMALLOC_COLD
bool
prof_log_stop(tsdn_t *tsdn) {
cassert(config_prof);
if (!opt_prof || !prof_booted) {
return true;
}
tsd_t *tsd = tsdn_tsd(tsdn);
malloc_mutex_lock(tsdn, &log_mtx);
if (prof_logging_state != prof_logging_state_started) {
malloc_mutex_unlock(tsdn, &log_mtx);
return true;
}
/*
* Set the state to dumping. We'll set it to stopped when we're done.
* Since other threads won't be able to start/stop/log when the state is
* dumping, we don't have to hold the lock during the whole method.
*/
prof_logging_state = prof_logging_state_dumping;
malloc_mutex_unlock(tsdn, &log_mtx);
emitter_t emitter;
/* Create a file. */
int fd;
if (prof_log_dummy) {
fd = 0;
} else {
fd = creat(log_filename, 0644);
}
if (fd == -1) {
malloc_printf("<jemalloc>: creat() for log file \"%s\" "
" failed with %d\n", log_filename, errno);
if (opt_abort) {
abort();
}
return true;
}
struct prof_emitter_cb_arg_s arg;
arg.fd = fd;
buf_writer_t buf_writer;
buf_writer_init(tsdn, &buf_writer, prof_emitter_write_cb, &arg, NULL,
PROF_LOG_STOP_BUFSIZE);
emitter_init(&emitter, emitter_output_json_compact, buf_writer_cb,
&buf_writer);
emitter_begin(&emitter);
prof_log_emit_metadata(&emitter);
prof_log_emit_threads(tsd, &emitter);
prof_log_emit_traces(tsd, &emitter);
prof_log_emit_allocs(tsd, &emitter);
emitter_end(&emitter);
buf_writer_terminate(tsdn, &buf_writer);
/* Reset global state. */
if (log_tables_initialized) {
ckh_delete(tsd, &log_bt_node_set);
ckh_delete(tsd, &log_thr_node_set);
}
log_tables_initialized = false;
log_bt_index = 0;
log_thr_index = 0;
log_bt_first = NULL;
log_bt_last = NULL;
log_thr_first = NULL;
log_thr_last = NULL;
log_alloc_first = NULL;
log_alloc_last = NULL;
malloc_mutex_lock(tsdn, &log_mtx);
prof_logging_state = prof_logging_state_stopped;
malloc_mutex_unlock(tsdn, &log_mtx);
if (prof_log_dummy) {
return false;
}
return close(fd) || arg.ret == -1;
}
#undef PROF_LOG_STOP_BUFSIZE
JEMALLOC_COLD
bool
prof_log_init(tsd_t *tsd) {
cassert(config_prof);
if (malloc_mutex_init(&log_mtx, "prof_log",
WITNESS_RANK_PROF_LOG, malloc_mutex_rank_exclusive)) {
return true;
}
if (opt_prof_log) {
prof_log_start(tsd_tsdn(tsd), NULL);
}
return false;
}
/******************************************************************************/

View file

@ -0,0 +1,600 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/buf_writer.h"
#include "jemalloc/internal/emitter.h"
#include "jemalloc/internal/prof_data.h"
#include "jemalloc/internal/prof_recent.h"
ssize_t opt_prof_recent_alloc_max = PROF_RECENT_ALLOC_MAX_DEFAULT;
malloc_mutex_t prof_recent_alloc_mtx; /* Protects the fields below */
static atomic_zd_t prof_recent_alloc_max;
static ssize_t prof_recent_alloc_count = 0;
prof_recent_list_t prof_recent_alloc_list;
malloc_mutex_t prof_recent_dump_mtx; /* Protects dumping. */
static void
prof_recent_alloc_max_init() {
atomic_store_zd(&prof_recent_alloc_max, opt_prof_recent_alloc_max,
ATOMIC_RELAXED);
}
static inline ssize_t
prof_recent_alloc_max_get_no_lock() {
return atomic_load_zd(&prof_recent_alloc_max, ATOMIC_RELAXED);
}
static inline ssize_t
prof_recent_alloc_max_get(tsd_t *tsd) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
return prof_recent_alloc_max_get_no_lock();
}
static inline ssize_t
prof_recent_alloc_max_update(tsd_t *tsd, ssize_t max) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
ssize_t old_max = prof_recent_alloc_max_get(tsd);
atomic_store_zd(&prof_recent_alloc_max, max, ATOMIC_RELAXED);
return old_max;
}
static prof_recent_t *
prof_recent_allocate_node(tsdn_t *tsdn) {
return (prof_recent_t *)iallocztm(tsdn, sizeof(prof_recent_t),
sz_size2index(sizeof(prof_recent_t)), false, NULL, true,
arena_get(tsdn, 0, false), true);
}
static void
prof_recent_free_node(tsdn_t *tsdn, prof_recent_t *node) {
assert(node != NULL);
assert(isalloc(tsdn, node) == sz_s2u(sizeof(prof_recent_t)));
idalloctm(tsdn, node, NULL, NULL, true, true);
}
static inline void
increment_recent_count(tsd_t *tsd, prof_tctx_t *tctx) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
++tctx->recent_count;
assert(tctx->recent_count > 0);
}
bool
prof_recent_alloc_prepare(tsd_t *tsd, prof_tctx_t *tctx) {
cassert(config_prof);
assert(opt_prof && prof_booted);
malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
/*
* Check whether last-N mode is turned on without trying to acquire the
* lock, so as to optimize for the following two scenarios:
* (1) Last-N mode is switched off;
* (2) Dumping, during which last-N mode is temporarily turned off so
* as not to block sampled allocations.
*/
if (prof_recent_alloc_max_get_no_lock() == 0) {
return false;
}
/*
* Increment recent_count to hold the tctx so that it won't be gone
* even after tctx->tdata->lock is released. This acts as a
* "placeholder"; the real recording of the allocation requires a lock
* on prof_recent_alloc_mtx and is done in prof_recent_alloc (when
* tctx->tdata->lock has been released).
*/
increment_recent_count(tsd, tctx);
return true;
}
static void
decrement_recent_count(tsd_t *tsd, prof_tctx_t *tctx) {
malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
assert(tctx != NULL);
malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
assert(tctx->recent_count > 0);
--tctx->recent_count;
prof_tctx_try_destroy(tsd, tctx);
}
static inline edata_t *
prof_recent_alloc_edata_get_no_lock(const prof_recent_t *n) {
return (edata_t *)atomic_load_p(&n->alloc_edata, ATOMIC_ACQUIRE);
}
edata_t *
prof_recent_alloc_edata_get_no_lock_test(const prof_recent_t *n) {
cassert(config_prof);
return prof_recent_alloc_edata_get_no_lock(n);
}
static inline edata_t *
prof_recent_alloc_edata_get(tsd_t *tsd, const prof_recent_t *n) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
return prof_recent_alloc_edata_get_no_lock(n);
}
static void
prof_recent_alloc_edata_set(tsd_t *tsd, prof_recent_t *n, edata_t *edata) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
atomic_store_p(&n->alloc_edata, edata, ATOMIC_RELEASE);
}
void
edata_prof_recent_alloc_init(edata_t *edata) {
cassert(config_prof);
edata_prof_recent_alloc_set_dont_call_directly(edata, NULL);
}
static inline prof_recent_t *
edata_prof_recent_alloc_get_no_lock(const edata_t *edata) {
cassert(config_prof);
return edata_prof_recent_alloc_get_dont_call_directly(edata);
}
prof_recent_t *
edata_prof_recent_alloc_get_no_lock_test(const edata_t *edata) {
cassert(config_prof);
return edata_prof_recent_alloc_get_no_lock(edata);
}
static inline prof_recent_t *
edata_prof_recent_alloc_get(tsd_t *tsd, const edata_t *edata) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
prof_recent_t *recent_alloc =
edata_prof_recent_alloc_get_no_lock(edata);
assert(recent_alloc == NULL ||
prof_recent_alloc_edata_get(tsd, recent_alloc) == edata);
return recent_alloc;
}
static prof_recent_t *
edata_prof_recent_alloc_update_internal(tsd_t *tsd, edata_t *edata,
prof_recent_t *recent_alloc) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
prof_recent_t *old_recent_alloc =
edata_prof_recent_alloc_get(tsd, edata);
edata_prof_recent_alloc_set_dont_call_directly(edata, recent_alloc);
return old_recent_alloc;
}
static void
edata_prof_recent_alloc_set(tsd_t *tsd, edata_t *edata,
prof_recent_t *recent_alloc) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
assert(recent_alloc != NULL);
prof_recent_t *old_recent_alloc =
edata_prof_recent_alloc_update_internal(tsd, edata, recent_alloc);
assert(old_recent_alloc == NULL);
prof_recent_alloc_edata_set(tsd, recent_alloc, edata);
}
static void
edata_prof_recent_alloc_reset(tsd_t *tsd, edata_t *edata,
prof_recent_t *recent_alloc) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
assert(recent_alloc != NULL);
prof_recent_t *old_recent_alloc =
edata_prof_recent_alloc_update_internal(tsd, edata, NULL);
assert(old_recent_alloc == recent_alloc);
assert(edata == prof_recent_alloc_edata_get(tsd, recent_alloc));
prof_recent_alloc_edata_set(tsd, recent_alloc, NULL);
}
/*
* This function should be called right before an allocation is released, so
* that the associated recent allocation record can contain the following
* information:
* (1) The allocation is released;
* (2) The time of the deallocation; and
* (3) The prof_tctx associated with the deallocation.
*/
void
prof_recent_alloc_reset(tsd_t *tsd, edata_t *edata) {
cassert(config_prof);
/*
* Check whether the recent allocation record still exists without
* trying to acquire the lock.
*/
if (edata_prof_recent_alloc_get_no_lock(edata) == NULL) {
return;
}
prof_tctx_t *dalloc_tctx = prof_tctx_create(tsd);
/*
* In case dalloc_tctx is NULL, e.g. due to OOM, we will not record the
* deallocation time / tctx, which is handled later, after we check
* again when holding the lock.
*/
if (dalloc_tctx != NULL) {
malloc_mutex_lock(tsd_tsdn(tsd), dalloc_tctx->tdata->lock);
increment_recent_count(tsd, dalloc_tctx);
dalloc_tctx->prepared = false;
malloc_mutex_unlock(tsd_tsdn(tsd), dalloc_tctx->tdata->lock);
}
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
/* Check again after acquiring the lock. */
prof_recent_t *recent = edata_prof_recent_alloc_get(tsd, edata);
if (recent != NULL) {
assert(nstime_equals_zero(&recent->dalloc_time));
assert(recent->dalloc_tctx == NULL);
if (dalloc_tctx != NULL) {
nstime_prof_update(&recent->dalloc_time);
recent->dalloc_tctx = dalloc_tctx;
dalloc_tctx = NULL;
}
edata_prof_recent_alloc_reset(tsd, edata, recent);
}
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
if (dalloc_tctx != NULL) {
/* We lost the rase - the allocation record was just gone. */
decrement_recent_count(tsd, dalloc_tctx);
}
}
static void
prof_recent_alloc_evict_edata(tsd_t *tsd, prof_recent_t *recent_alloc) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
edata_t *edata = prof_recent_alloc_edata_get(tsd, recent_alloc);
if (edata != NULL) {
edata_prof_recent_alloc_reset(tsd, edata, recent_alloc);
}
}
static bool
prof_recent_alloc_is_empty(tsd_t *tsd) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
if (ql_empty(&prof_recent_alloc_list)) {
assert(prof_recent_alloc_count == 0);
return true;
} else {
assert(prof_recent_alloc_count > 0);
return false;
}
}
static void
prof_recent_alloc_assert_count(tsd_t *tsd) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
if (!config_debug) {
return;
}
ssize_t count = 0;
prof_recent_t *n;
ql_foreach(n, &prof_recent_alloc_list, link) {
++count;
}
assert(count == prof_recent_alloc_count);
assert(prof_recent_alloc_max_get(tsd) == -1 ||
count <= prof_recent_alloc_max_get(tsd));
}
void
prof_recent_alloc(tsd_t *tsd, edata_t *edata, size_t size, size_t usize) {
cassert(config_prof);
assert(edata != NULL);
prof_tctx_t *tctx = edata_prof_tctx_get(edata);
malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock);
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
prof_recent_alloc_assert_count(tsd);
/*
* Reserve a new prof_recent_t node if needed. If needed, we release
* the prof_recent_alloc_mtx lock and allocate. Then, rather than
* immediately checking for OOM, we regain the lock and try to make use
* of the reserve node if needed. There are six scenarios:
*
* \ now | no need | need but OOMed | need and allocated
* later \ | | |
* ------------------------------------------------------------
* no need | (1) | (2) | (3)
* ------------------------------------------------------------
* need | (4) | (5) | (6)
*
* First, "(4)" never happens, because we don't release the lock in the
* middle if there's no need for a new node; in such cases "(1)" always
* takes place, which is trivial.
*
* Out of the remaining four scenarios, "(6)" is the common case and is
* trivial. "(5)" is also trivial, in which case we'll rollback the
* effect of prof_recent_alloc_prepare() as expected.
*
* "(2)" / "(3)" occurs when the need for a new node is gone after we
* regain the lock. If the new node is successfully allocated, i.e. in
* the case of "(3)", we'll release it in the end; otherwise, i.e. in
* the case of "(2)", we do nothing - we're lucky that the OOM ends up
* doing no harm at all.
*
* Therefore, the only performance cost of the "release lock" ->
* "allocate" -> "regain lock" design is the "(3)" case, but it happens
* very rarely, so the cost is relatively small compared to the gain of
* not having to have the lock order of prof_recent_alloc_mtx above all
* the allocation locks.
*/
prof_recent_t *reserve = NULL;
if (prof_recent_alloc_max_get(tsd) == -1 ||
prof_recent_alloc_count < prof_recent_alloc_max_get(tsd)) {
assert(prof_recent_alloc_max_get(tsd) != 0);
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
reserve = prof_recent_allocate_node(tsd_tsdn(tsd));
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
prof_recent_alloc_assert_count(tsd);
}
if (prof_recent_alloc_max_get(tsd) == 0) {
assert(prof_recent_alloc_is_empty(tsd));
goto label_rollback;
}
prof_tctx_t *old_alloc_tctx, *old_dalloc_tctx;
if (prof_recent_alloc_count == prof_recent_alloc_max_get(tsd)) {
/* If upper limit is reached, rotate the head. */
assert(prof_recent_alloc_max_get(tsd) != -1);
assert(!prof_recent_alloc_is_empty(tsd));
prof_recent_t *head = ql_first(&prof_recent_alloc_list);
old_alloc_tctx = head->alloc_tctx;
assert(old_alloc_tctx != NULL);
old_dalloc_tctx = head->dalloc_tctx;
prof_recent_alloc_evict_edata(tsd, head);
ql_rotate(&prof_recent_alloc_list, link);
} else {
/* Otherwise make use of the new node. */
assert(prof_recent_alloc_max_get(tsd) == -1 ||
prof_recent_alloc_count < prof_recent_alloc_max_get(tsd));
if (reserve == NULL) {
goto label_rollback;
}
ql_elm_new(reserve, link);
ql_tail_insert(&prof_recent_alloc_list, reserve, link);
reserve = NULL;
old_alloc_tctx = NULL;
old_dalloc_tctx = NULL;
++prof_recent_alloc_count;
}
/* Fill content into the tail node. */
prof_recent_t *tail = ql_last(&prof_recent_alloc_list, link);
assert(tail != NULL);
tail->size = size;
tail->usize = usize;
nstime_copy(&tail->alloc_time, edata_prof_alloc_time_get(edata));
tail->alloc_tctx = tctx;
nstime_init_zero(&tail->dalloc_time);
tail->dalloc_tctx = NULL;
edata_prof_recent_alloc_set(tsd, edata, tail);
assert(!prof_recent_alloc_is_empty(tsd));
prof_recent_alloc_assert_count(tsd);
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
if (reserve != NULL) {
prof_recent_free_node(tsd_tsdn(tsd), reserve);
}
/*
* Asynchronously handle the tctx of the old node, so that there's no
* simultaneous holdings of prof_recent_alloc_mtx and tdata->lock.
* In the worst case this may delay the tctx release but it's better
* than holding prof_recent_alloc_mtx for longer.
*/
if (old_alloc_tctx != NULL) {
decrement_recent_count(tsd, old_alloc_tctx);
}
if (old_dalloc_tctx != NULL) {
decrement_recent_count(tsd, old_dalloc_tctx);
}
return;
label_rollback:
assert(edata_prof_recent_alloc_get(tsd, edata) == NULL);
prof_recent_alloc_assert_count(tsd);
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
if (reserve != NULL) {
prof_recent_free_node(tsd_tsdn(tsd), reserve);
}
decrement_recent_count(tsd, tctx);
}
ssize_t
prof_recent_alloc_max_ctl_read() {
cassert(config_prof);
/* Don't bother to acquire the lock. */
return prof_recent_alloc_max_get_no_lock();
}
static void
prof_recent_alloc_restore_locked(tsd_t *tsd, prof_recent_list_t *to_delete) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
ssize_t max = prof_recent_alloc_max_get(tsd);
if (max == -1 || prof_recent_alloc_count <= max) {
/* Easy case - no need to alter the list. */
ql_new(to_delete);
prof_recent_alloc_assert_count(tsd);
return;
}
prof_recent_t *node;
ql_foreach(node, &prof_recent_alloc_list, link) {
if (prof_recent_alloc_count == max) {
break;
}
prof_recent_alloc_evict_edata(tsd, node);
--prof_recent_alloc_count;
}
assert(prof_recent_alloc_count == max);
ql_move(to_delete, &prof_recent_alloc_list);
if (max == 0) {
assert(node == NULL);
} else {
assert(node != NULL);
ql_split(to_delete, node, &prof_recent_alloc_list, link);
}
assert(!ql_empty(to_delete));
prof_recent_alloc_assert_count(tsd);
}
static void
prof_recent_alloc_async_cleanup(tsd_t *tsd, prof_recent_list_t *to_delete) {
malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &prof_recent_dump_mtx);
malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
while (!ql_empty(to_delete)) {
prof_recent_t *node = ql_first(to_delete);
ql_remove(to_delete, node, link);
decrement_recent_count(tsd, node->alloc_tctx);
if (node->dalloc_tctx != NULL) {
decrement_recent_count(tsd, node->dalloc_tctx);
}
prof_recent_free_node(tsd_tsdn(tsd), node);
}
}
ssize_t
prof_recent_alloc_max_ctl_write(tsd_t *tsd, ssize_t max) {
cassert(config_prof);
assert(max >= -1);
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
prof_recent_alloc_assert_count(tsd);
const ssize_t old_max = prof_recent_alloc_max_update(tsd, max);
prof_recent_list_t to_delete;
prof_recent_alloc_restore_locked(tsd, &to_delete);
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
prof_recent_alloc_async_cleanup(tsd, &to_delete);
return old_max;
}
static void
prof_recent_alloc_dump_bt(emitter_t *emitter, prof_tctx_t *tctx) {
char bt_buf[2 * sizeof(intptr_t) + 3];
char *s = bt_buf;
assert(tctx != NULL);
prof_bt_t *bt = &tctx->gctx->bt;
for (size_t i = 0; i < bt->len; ++i) {
malloc_snprintf(bt_buf, sizeof(bt_buf), "%p", bt->vec[i]);
emitter_json_value(emitter, emitter_type_string, &s);
}
}
static void
prof_recent_alloc_dump_node(emitter_t *emitter, prof_recent_t *node) {
emitter_json_object_begin(emitter);
emitter_json_kv(emitter, "size", emitter_type_size, &node->size);
emitter_json_kv(emitter, "usize", emitter_type_size, &node->usize);
bool released = prof_recent_alloc_edata_get_no_lock(node) == NULL;
emitter_json_kv(emitter, "released", emitter_type_bool, &released);
emitter_json_kv(emitter, "alloc_thread_uid", emitter_type_uint64,
&node->alloc_tctx->thr_uid);
prof_tdata_t *alloc_tdata = node->alloc_tctx->tdata;
assert(alloc_tdata != NULL);
if (alloc_tdata->thread_name != NULL) {
emitter_json_kv(emitter, "alloc_thread_name",
emitter_type_string, &alloc_tdata->thread_name);
}
uint64_t alloc_time_ns = nstime_ns(&node->alloc_time);
emitter_json_kv(emitter, "alloc_time", emitter_type_uint64,
&alloc_time_ns);
emitter_json_array_kv_begin(emitter, "alloc_trace");
prof_recent_alloc_dump_bt(emitter, node->alloc_tctx);
emitter_json_array_end(emitter);
if (released && node->dalloc_tctx != NULL) {
emitter_json_kv(emitter, "dalloc_thread_uid",
emitter_type_uint64, &node->dalloc_tctx->thr_uid);
prof_tdata_t *dalloc_tdata = node->dalloc_tctx->tdata;
assert(dalloc_tdata != NULL);
if (dalloc_tdata->thread_name != NULL) {
emitter_json_kv(emitter, "dalloc_thread_name",
emitter_type_string, &dalloc_tdata->thread_name);
}
assert(!nstime_equals_zero(&node->dalloc_time));
uint64_t dalloc_time_ns = nstime_ns(&node->dalloc_time);
emitter_json_kv(emitter, "dalloc_time", emitter_type_uint64,
&dalloc_time_ns);
emitter_json_array_kv_begin(emitter, "dalloc_trace");
prof_recent_alloc_dump_bt(emitter, node->dalloc_tctx);
emitter_json_array_end(emitter);
}
emitter_json_object_end(emitter);
}
#define PROF_RECENT_PRINT_BUFSIZE 65536
JEMALLOC_COLD
void
prof_recent_alloc_dump(tsd_t *tsd, write_cb_t *write_cb, void *cbopaque) {
cassert(config_prof);
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_dump_mtx);
buf_writer_t buf_writer;
buf_writer_init(tsd_tsdn(tsd), &buf_writer, write_cb, cbopaque, NULL,
PROF_RECENT_PRINT_BUFSIZE);
emitter_t emitter;
emitter_init(&emitter, emitter_output_json_compact, buf_writer_cb,
&buf_writer);
prof_recent_list_t temp_list;
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
prof_recent_alloc_assert_count(tsd);
ssize_t dump_max = prof_recent_alloc_max_get(tsd);
ql_move(&temp_list, &prof_recent_alloc_list);
ssize_t dump_count = prof_recent_alloc_count;
prof_recent_alloc_count = 0;
prof_recent_alloc_assert_count(tsd);
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
emitter_begin(&emitter);
uint64_t sample_interval = (uint64_t)1U << lg_prof_sample;
emitter_json_kv(&emitter, "sample_interval", emitter_type_uint64,
&sample_interval);
emitter_json_kv(&emitter, "recent_alloc_max", emitter_type_ssize,
&dump_max);
emitter_json_array_kv_begin(&emitter, "recent_alloc");
prof_recent_t *node;
ql_foreach(node, &temp_list, link) {
prof_recent_alloc_dump_node(&emitter, node);
}
emitter_json_array_end(&emitter);
emitter_end(&emitter);
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
prof_recent_alloc_assert_count(tsd);
ql_concat(&temp_list, &prof_recent_alloc_list, link);
ql_move(&prof_recent_alloc_list, &temp_list);
prof_recent_alloc_count += dump_count;
prof_recent_alloc_restore_locked(tsd, &temp_list);
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
buf_writer_terminate(tsd_tsdn(tsd), &buf_writer);
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_dump_mtx);
prof_recent_alloc_async_cleanup(tsd, &temp_list);
}
#undef PROF_RECENT_PRINT_BUFSIZE
bool
prof_recent_init() {
cassert(config_prof);
prof_recent_alloc_max_init();
if (malloc_mutex_init(&prof_recent_alloc_mtx, "prof_recent_alloc",
WITNESS_RANK_PROF_RECENT_ALLOC, malloc_mutex_rank_exclusive)) {
return true;
}
if (malloc_mutex_init(&prof_recent_dump_mtx, "prof_recent_dump",
WITNESS_RANK_PROF_RECENT_DUMP, malloc_mutex_rank_exclusive)) {
return true;
}
ql_new(&prof_recent_alloc_list);
return false;
}

View file

@ -0,0 +1,57 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/prof_stats.h"
bool opt_prof_stats = false;
malloc_mutex_t prof_stats_mtx;
static prof_stats_t prof_stats_live[PROF_SC_NSIZES];
static prof_stats_t prof_stats_accum[PROF_SC_NSIZES];
static void
prof_stats_enter(tsd_t *tsd, szind_t ind) {
assert(opt_prof && opt_prof_stats);
assert(ind < SC_NSIZES);
malloc_mutex_lock(tsd_tsdn(tsd), &prof_stats_mtx);
}
static void
prof_stats_leave(tsd_t *tsd) {
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_stats_mtx);
}
void
prof_stats_inc(tsd_t *tsd, szind_t ind, size_t size) {
cassert(config_prof);
prof_stats_enter(tsd, ind);
prof_stats_live[ind].req_sum += size;
prof_stats_live[ind].count++;
prof_stats_accum[ind].req_sum += size;
prof_stats_accum[ind].count++;
prof_stats_leave(tsd);
}
void
prof_stats_dec(tsd_t *tsd, szind_t ind, size_t size) {
cassert(config_prof);
prof_stats_enter(tsd, ind);
prof_stats_live[ind].req_sum -= size;
prof_stats_live[ind].count--;
prof_stats_leave(tsd);
}
void
prof_stats_get_live(tsd_t *tsd, szind_t ind, prof_stats_t *stats) {
cassert(config_prof);
prof_stats_enter(tsd, ind);
memcpy(stats, &prof_stats_live[ind], sizeof(prof_stats_t));
prof_stats_leave(tsd);
}
void
prof_stats_get_accum(tsd_t *tsd, szind_t ind, prof_stats_t *stats) {
cassert(config_prof);
prof_stats_enter(tsd, ind);
memcpy(stats, &prof_stats_accum[ind], sizeof(prof_stats_t));
prof_stats_leave(tsd);
}

View file

@ -0,0 +1,669 @@
#define JEMALLOC_PROF_SYS_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/buf_writer.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/prof_data.h"
#include "jemalloc/internal/prof_sys.h"
#ifdef JEMALLOC_PROF_LIBUNWIND
#define UNW_LOCAL_ONLY
#include <libunwind.h>
#endif
#ifdef JEMALLOC_PROF_LIBGCC
/*
* We have a circular dependency -- jemalloc_internal.h tells us if we should
* use libgcc's unwinding functionality, but after we've included that, we've
* already hooked _Unwind_Backtrace. We'll temporarily disable hooking.
*/
#undef _Unwind_Backtrace
#include <unwind.h>
#define _Unwind_Backtrace JEMALLOC_TEST_HOOK(_Unwind_Backtrace, test_hooks_libc_hook)
#endif
/******************************************************************************/
malloc_mutex_t prof_dump_filename_mtx;
bool prof_do_mock = false;
static uint64_t prof_dump_seq;
static uint64_t prof_dump_iseq;
static uint64_t prof_dump_mseq;
static uint64_t prof_dump_useq;
static char *prof_prefix = NULL;
/* The fallback allocator profiling functionality will use. */
base_t *prof_base;
void
bt_init(prof_bt_t *bt, void **vec) {
cassert(config_prof);
bt->vec = vec;
bt->len = 0;
}
#ifdef JEMALLOC_PROF_LIBUNWIND
static void
prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) {
int nframes;
cassert(config_prof);
assert(*len == 0);
assert(vec != NULL);
assert(max_len == PROF_BT_MAX);
nframes = unw_backtrace(vec, PROF_BT_MAX);
if (nframes <= 0) {
return;
}
*len = nframes;
}
#elif (defined(JEMALLOC_PROF_LIBGCC))
static _Unwind_Reason_Code
prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) {
cassert(config_prof);
return _URC_NO_REASON;
}
static _Unwind_Reason_Code
prof_unwind_callback(struct _Unwind_Context *context, void *arg) {
prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
void *ip;
cassert(config_prof);
ip = (void *)_Unwind_GetIP(context);
if (ip == NULL) {
return _URC_END_OF_STACK;
}
data->vec[*data->len] = ip;
(*data->len)++;
if (*data->len == data->max) {
return _URC_END_OF_STACK;
}
return _URC_NO_REASON;
}
static void
prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) {
prof_unwind_data_t data = {vec, len, max_len};
cassert(config_prof);
assert(vec != NULL);
assert(max_len == PROF_BT_MAX);
_Unwind_Backtrace(prof_unwind_callback, &data);
}
#elif (defined(JEMALLOC_PROF_GCC))
static void
prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) {
#define BT_FRAME(i) \
if ((i) < max_len) { \
void *p; \
if (__builtin_frame_address(i) == 0) { \
return; \
} \
p = __builtin_return_address(i); \
if (p == NULL) { \
return; \
} \
vec[(i)] = p; \
*len = (i) + 1; \
} else { \
return; \
}
cassert(config_prof);
assert(vec != NULL);
assert(max_len == PROF_BT_MAX);
BT_FRAME(0)
BT_FRAME(1)
BT_FRAME(2)
BT_FRAME(3)
BT_FRAME(4)
BT_FRAME(5)
BT_FRAME(6)
BT_FRAME(7)
BT_FRAME(8)
BT_FRAME(9)
BT_FRAME(10)
BT_FRAME(11)
BT_FRAME(12)
BT_FRAME(13)
BT_FRAME(14)
BT_FRAME(15)
BT_FRAME(16)
BT_FRAME(17)
BT_FRAME(18)
BT_FRAME(19)
BT_FRAME(20)
BT_FRAME(21)
BT_FRAME(22)
BT_FRAME(23)
BT_FRAME(24)
BT_FRAME(25)
BT_FRAME(26)
BT_FRAME(27)
BT_FRAME(28)
BT_FRAME(29)
BT_FRAME(30)
BT_FRAME(31)
BT_FRAME(32)
BT_FRAME(33)
BT_FRAME(34)
BT_FRAME(35)
BT_FRAME(36)
BT_FRAME(37)
BT_FRAME(38)
BT_FRAME(39)
BT_FRAME(40)
BT_FRAME(41)
BT_FRAME(42)
BT_FRAME(43)
BT_FRAME(44)
BT_FRAME(45)
BT_FRAME(46)
BT_FRAME(47)
BT_FRAME(48)
BT_FRAME(49)
BT_FRAME(50)
BT_FRAME(51)
BT_FRAME(52)
BT_FRAME(53)
BT_FRAME(54)
BT_FRAME(55)
BT_FRAME(56)
BT_FRAME(57)
BT_FRAME(58)
BT_FRAME(59)
BT_FRAME(60)
BT_FRAME(61)
BT_FRAME(62)
BT_FRAME(63)
BT_FRAME(64)
BT_FRAME(65)
BT_FRAME(66)
BT_FRAME(67)
BT_FRAME(68)
BT_FRAME(69)
BT_FRAME(70)
BT_FRAME(71)
BT_FRAME(72)
BT_FRAME(73)
BT_FRAME(74)
BT_FRAME(75)
BT_FRAME(76)
BT_FRAME(77)
BT_FRAME(78)
BT_FRAME(79)
BT_FRAME(80)
BT_FRAME(81)
BT_FRAME(82)
BT_FRAME(83)
BT_FRAME(84)
BT_FRAME(85)
BT_FRAME(86)
BT_FRAME(87)
BT_FRAME(88)
BT_FRAME(89)
BT_FRAME(90)
BT_FRAME(91)
BT_FRAME(92)
BT_FRAME(93)
BT_FRAME(94)
BT_FRAME(95)
BT_FRAME(96)
BT_FRAME(97)
BT_FRAME(98)
BT_FRAME(99)
BT_FRAME(100)
BT_FRAME(101)
BT_FRAME(102)
BT_FRAME(103)
BT_FRAME(104)
BT_FRAME(105)
BT_FRAME(106)
BT_FRAME(107)
BT_FRAME(108)
BT_FRAME(109)
BT_FRAME(110)
BT_FRAME(111)
BT_FRAME(112)
BT_FRAME(113)
BT_FRAME(114)
BT_FRAME(115)
BT_FRAME(116)
BT_FRAME(117)
BT_FRAME(118)
BT_FRAME(119)
BT_FRAME(120)
BT_FRAME(121)
BT_FRAME(122)
BT_FRAME(123)
BT_FRAME(124)
BT_FRAME(125)
BT_FRAME(126)
BT_FRAME(127)
#undef BT_FRAME
}
#else
static void
prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) {
cassert(config_prof);
not_reached();
}
#endif
void
prof_backtrace(tsd_t *tsd, prof_bt_t *bt) {
cassert(config_prof);
prof_backtrace_hook_t prof_backtrace_hook = prof_backtrace_hook_get();
assert(prof_backtrace_hook != NULL);
pre_reentrancy(tsd, NULL);
prof_backtrace_hook(bt->vec, &bt->len, PROF_BT_MAX);
post_reentrancy(tsd);
}
void
prof_hooks_init() {
prof_backtrace_hook_set(&prof_backtrace_impl);
prof_dump_hook_set(NULL);
}
void
prof_unwind_init() {
#ifdef JEMALLOC_PROF_LIBGCC
/*
* Cause the backtracing machinery to allocate its internal
* state before enabling profiling.
*/
_Unwind_Backtrace(prof_unwind_init_callback, NULL);
#endif
}
static int
prof_sys_thread_name_read_impl(char *buf, size_t limit) {
#if defined(JEMALLOC_HAVE_PTHREAD_GETNAME_NP)
return pthread_getname_np(pthread_self(), buf, limit);
#elif defined(JEMALLOC_HAVE_PTHREAD_GET_NAME_NP)
pthread_get_name_np(pthread_self(), buf, limit);
return 0;
#else
return ENOSYS;
#endif
}
prof_sys_thread_name_read_t *JET_MUTABLE prof_sys_thread_name_read =
prof_sys_thread_name_read_impl;
void
prof_sys_thread_name_fetch(tsd_t *tsd) {
#define THREAD_NAME_MAX_LEN 16
char buf[THREAD_NAME_MAX_LEN];
if (!prof_sys_thread_name_read(buf, THREAD_NAME_MAX_LEN)) {
prof_thread_name_set_impl(tsd, buf);
}
#undef THREAD_NAME_MAX_LEN
}
int
prof_getpid(void) {
#ifdef _WIN32
return GetCurrentProcessId();
#else
return getpid();
#endif
}
/*
* This buffer is rather large for stack allocation, so use a single buffer for
* all profile dumps; protected by prof_dump_mtx.
*/
static char prof_dump_buf[PROF_DUMP_BUFSIZE];
typedef struct prof_dump_arg_s prof_dump_arg_t;
struct prof_dump_arg_s {
/*
* Whether error should be handled locally: if true, then we print out
* error message as well as abort (if opt_abort is true) when an error
* occurred, and we also report the error back to the caller in the end;
* if false, then we only report the error back to the caller in the
* end.
*/
const bool handle_error_locally;
/*
* Whether there has been an error in the dumping process, which could
* have happened either in file opening or in file writing. When an
* error has already occurred, we will stop further writing to the file.
*/
bool error;
/* File descriptor of the dump file. */
int prof_dump_fd;
};
static void
prof_dump_check_possible_error(prof_dump_arg_t *arg, bool err_cond,
const char *format, ...) {
assert(!arg->error);
if (!err_cond) {
return;
}
arg->error = true;
if (!arg->handle_error_locally) {
return;
}
va_list ap;
char buf[PROF_PRINTF_BUFSIZE];
va_start(ap, format);
malloc_vsnprintf(buf, sizeof(buf), format, ap);
va_end(ap);
malloc_write(buf);
if (opt_abort) {
abort();
}
}
static int
prof_dump_open_file_impl(const char *filename, int mode) {
return creat(filename, mode);
}
prof_dump_open_file_t *JET_MUTABLE prof_dump_open_file =
prof_dump_open_file_impl;
static void
prof_dump_open(prof_dump_arg_t *arg, const char *filename) {
arg->prof_dump_fd = prof_dump_open_file(filename, 0644);
prof_dump_check_possible_error(arg, arg->prof_dump_fd == -1,
"<jemalloc>: failed to open \"%s\"\n", filename);
}
prof_dump_write_file_t *JET_MUTABLE prof_dump_write_file = malloc_write_fd;
static void
prof_dump_flush(void *opaque, const char *s) {
cassert(config_prof);
prof_dump_arg_t *arg = (prof_dump_arg_t *)opaque;
if (!arg->error) {
ssize_t err = prof_dump_write_file(arg->prof_dump_fd, s,
strlen(s));
prof_dump_check_possible_error(arg, err == -1,
"<jemalloc>: failed to write during heap profile flush\n");
}
}
static void
prof_dump_close(prof_dump_arg_t *arg) {
if (arg->prof_dump_fd != -1) {
close(arg->prof_dump_fd);
}
}
#ifndef _WIN32
JEMALLOC_FORMAT_PRINTF(1, 2)
static int
prof_open_maps_internal(const char *format, ...) {
int mfd;
va_list ap;
char filename[PATH_MAX + 1];
va_start(ap, format);
malloc_vsnprintf(filename, sizeof(filename), format, ap);
va_end(ap);
#if defined(O_CLOEXEC)
mfd = open(filename, O_RDONLY | O_CLOEXEC);
#else
mfd = open(filename, O_RDONLY);
if (mfd != -1) {
fcntl(mfd, F_SETFD, fcntl(mfd, F_GETFD) | FD_CLOEXEC);
}
#endif
return mfd;
}
#endif
static int
prof_dump_open_maps_impl() {
int mfd;
cassert(config_prof);
#if defined(__FreeBSD__) || defined(__DragonFly__)
mfd = prof_open_maps_internal("/proc/curproc/map");
#elif defined(_WIN32)
mfd = -1; // Not implemented
#else
int pid = prof_getpid();
mfd = prof_open_maps_internal("/proc/%d/task/%d/maps", pid, pid);
if (mfd == -1) {
mfd = prof_open_maps_internal("/proc/%d/maps", pid);
}
#endif
return mfd;
}
prof_dump_open_maps_t *JET_MUTABLE prof_dump_open_maps =
prof_dump_open_maps_impl;
static ssize_t
prof_dump_read_maps_cb(void *read_cbopaque, void *buf, size_t limit) {
int mfd = *(int *)read_cbopaque;
assert(mfd != -1);
return malloc_read_fd(mfd, buf, limit);
}
static void
prof_dump_maps(buf_writer_t *buf_writer) {
int mfd = prof_dump_open_maps();
if (mfd == -1) {
return;
}
buf_writer_cb(buf_writer, "\nMAPPED_LIBRARIES:\n");
buf_writer_pipe(buf_writer, prof_dump_read_maps_cb, &mfd);
close(mfd);
}
static bool
prof_dump(tsd_t *tsd, bool propagate_err, const char *filename,
bool leakcheck) {
cassert(config_prof);
assert(tsd_reentrancy_level_get(tsd) == 0);
prof_tdata_t * tdata = prof_tdata_get(tsd, true);
if (tdata == NULL) {
return true;
}
prof_dump_arg_t arg = {/* handle_error_locally */ !propagate_err,
/* error */ false, /* prof_dump_fd */ -1};
pre_reentrancy(tsd, NULL);
malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
prof_dump_open(&arg, filename);
buf_writer_t buf_writer;
bool err = buf_writer_init(tsd_tsdn(tsd), &buf_writer, prof_dump_flush,
&arg, prof_dump_buf, PROF_DUMP_BUFSIZE);
assert(!err);
prof_dump_impl(tsd, buf_writer_cb, &buf_writer, tdata, leakcheck);
prof_dump_maps(&buf_writer);
buf_writer_terminate(tsd_tsdn(tsd), &buf_writer);
prof_dump_close(&arg);
prof_dump_hook_t dump_hook = prof_dump_hook_get();
if (dump_hook != NULL) {
dump_hook(filename);
}
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
post_reentrancy(tsd);
return arg.error;
}
/*
* If profiling is off, then PROF_DUMP_FILENAME_LEN is 1, so we'll end up
* calling strncpy with a size of 0, which triggers a -Wstringop-truncation
* warning (strncpy can never actually be called in this case, since we bail out
* much earlier when config_prof is false). This function works around the
* warning to let us leave the warning on.
*/
static inline void
prof_strncpy(char *UNUSED dest, const char *UNUSED src, size_t UNUSED size) {
cassert(config_prof);
#ifdef JEMALLOC_PROF
strncpy(dest, src, size);
#endif
}
static const char *
prof_prefix_get(tsdn_t* tsdn) {
malloc_mutex_assert_owner(tsdn, &prof_dump_filename_mtx);
return prof_prefix == NULL ? opt_prof_prefix : prof_prefix;
}
static bool
prof_prefix_is_empty(tsdn_t *tsdn) {
malloc_mutex_lock(tsdn, &prof_dump_filename_mtx);
bool ret = (prof_prefix_get(tsdn)[0] == '\0');
malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx);
return ret;
}
#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
static void
prof_dump_filename(tsd_t *tsd, char *filename, char v, uint64_t vseq) {
cassert(config_prof);
assert(tsd_reentrancy_level_get(tsd) == 0);
const char *prefix = prof_prefix_get(tsd_tsdn(tsd));
if (vseq != VSEQ_INVALID) {
/* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
"%s.%d.%"FMTu64".%c%"FMTu64".heap", prefix, prof_getpid(),
prof_dump_seq, v, vseq);
} else {
/* "<prefix>.<pid>.<seq>.<v>.heap" */
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
"%s.%d.%"FMTu64".%c.heap", prefix, prof_getpid(),
prof_dump_seq, v);
}
prof_dump_seq++;
}
void
prof_get_default_filename(tsdn_t *tsdn, char *filename, uint64_t ind) {
malloc_mutex_lock(tsdn, &prof_dump_filename_mtx);
malloc_snprintf(filename, PROF_DUMP_FILENAME_LEN,
"%s.%d.%"FMTu64".json", prof_prefix_get(tsdn), prof_getpid(), ind);
malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx);
}
void
prof_fdump_impl(tsd_t *tsd) {
char filename[DUMP_FILENAME_BUFSIZE];
assert(!prof_prefix_is_empty(tsd_tsdn(tsd)));
malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
prof_dump_filename(tsd, filename, 'f', VSEQ_INVALID);
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
prof_dump(tsd, false, filename, opt_prof_leak);
}
bool
prof_prefix_set(tsdn_t *tsdn, const char *prefix) {
cassert(config_prof);
ctl_mtx_assert_held(tsdn);
malloc_mutex_lock(tsdn, &prof_dump_filename_mtx);
if (prof_prefix == NULL) {
malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx);
/* Everything is still guarded by ctl_mtx. */
char *buffer = base_alloc(tsdn, prof_base,
PROF_DUMP_FILENAME_LEN, QUANTUM);
if (buffer == NULL) {
return true;
}
malloc_mutex_lock(tsdn, &prof_dump_filename_mtx);
prof_prefix = buffer;
}
assert(prof_prefix != NULL);
prof_strncpy(prof_prefix, prefix, PROF_DUMP_FILENAME_LEN - 1);
prof_prefix[PROF_DUMP_FILENAME_LEN - 1] = '\0';
malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx);
return false;
}
void
prof_idump_impl(tsd_t *tsd) {
malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
if (prof_prefix_get(tsd_tsdn(tsd))[0] == '\0') {
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
return;
}
char filename[PATH_MAX + 1];
prof_dump_filename(tsd, filename, 'i', prof_dump_iseq);
prof_dump_iseq++;
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
prof_dump(tsd, false, filename, false);
}
bool
prof_mdump_impl(tsd_t *tsd, const char *filename) {
char filename_buf[DUMP_FILENAME_BUFSIZE];
if (filename == NULL) {
/* No filename specified, so automatically generate one. */
malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
if (prof_prefix_get(tsd_tsdn(tsd))[0] == '\0') {
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
return true;
}
prof_dump_filename(tsd, filename_buf, 'm', prof_dump_mseq);
prof_dump_mseq++;
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
filename = filename_buf;
}
return prof_dump(tsd, true, filename, false);
}
void
prof_gdump_impl(tsd_t *tsd) {
tsdn_t *tsdn = tsd_tsdn(tsd);
malloc_mutex_lock(tsdn, &prof_dump_filename_mtx);
if (prof_prefix_get(tsdn)[0] == '\0') {
malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx);
return;
}
char filename[DUMP_FILENAME_BUFSIZE];
prof_dump_filename(tsd, filename, 'u', prof_dump_useq);
prof_dump_useq++;
malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx);
prof_dump(tsd, false, filename, false);
}

385
BeefRT/JEMalloc/src/psset.c Normal file
View file

@ -0,0 +1,385 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/psset.h"
#include "jemalloc/internal/fb.h"
void
psset_init(psset_t *psset) {
for (unsigned i = 0; i < PSSET_NPSIZES; i++) {
hpdata_age_heap_new(&psset->pageslabs[i]);
}
fb_init(psset->pageslab_bitmap, PSSET_NPSIZES);
memset(&psset->merged_stats, 0, sizeof(psset->merged_stats));
memset(&psset->stats, 0, sizeof(psset->stats));
hpdata_empty_list_init(&psset->empty);
for (int i = 0; i < PSSET_NPURGE_LISTS; i++) {
hpdata_purge_list_init(&psset->to_purge[i]);
}
fb_init(psset->purge_bitmap, PSSET_NPURGE_LISTS);
hpdata_hugify_list_init(&psset->to_hugify);
}
static void
psset_bin_stats_accum(psset_bin_stats_t *dst, psset_bin_stats_t *src) {
dst->npageslabs += src->npageslabs;
dst->nactive += src->nactive;
dst->ndirty += src->ndirty;
}
void
psset_stats_accum(psset_stats_t *dst, psset_stats_t *src) {
psset_bin_stats_accum(&dst->full_slabs[0], &src->full_slabs[0]);
psset_bin_stats_accum(&dst->full_slabs[1], &src->full_slabs[1]);
psset_bin_stats_accum(&dst->empty_slabs[0], &src->empty_slabs[0]);
psset_bin_stats_accum(&dst->empty_slabs[1], &src->empty_slabs[1]);
for (pszind_t i = 0; i < PSSET_NPSIZES; i++) {
psset_bin_stats_accum(&dst->nonfull_slabs[i][0],
&src->nonfull_slabs[i][0]);
psset_bin_stats_accum(&dst->nonfull_slabs[i][1],
&src->nonfull_slabs[i][1]);
}
}
/*
* The stats maintenance strategy is to remove a pageslab's contribution to the
* stats when we call psset_update_begin, and re-add it (to a potentially new
* bin) when we call psset_update_end.
*/
JEMALLOC_ALWAYS_INLINE void
psset_bin_stats_insert_remove(psset_t *psset, psset_bin_stats_t *binstats,
hpdata_t *ps, bool insert) {
size_t mul = insert ? (size_t)1 : (size_t)-1;
size_t huge_idx = (size_t)hpdata_huge_get(ps);
binstats[huge_idx].npageslabs += mul * 1;
binstats[huge_idx].nactive += mul * hpdata_nactive_get(ps);
binstats[huge_idx].ndirty += mul * hpdata_ndirty_get(ps);
psset->merged_stats.npageslabs += mul * 1;
psset->merged_stats.nactive += mul * hpdata_nactive_get(ps);
psset->merged_stats.ndirty += mul * hpdata_ndirty_get(ps);
if (config_debug) {
psset_bin_stats_t check_stats = {0};
for (size_t huge = 0; huge <= 1; huge++) {
psset_bin_stats_accum(&check_stats,
&psset->stats.full_slabs[huge]);
psset_bin_stats_accum(&check_stats,
&psset->stats.empty_slabs[huge]);
for (pszind_t pind = 0; pind < PSSET_NPSIZES; pind++) {
psset_bin_stats_accum(&check_stats,
&psset->stats.nonfull_slabs[pind][huge]);
}
}
assert(psset->merged_stats.npageslabs
== check_stats.npageslabs);
assert(psset->merged_stats.nactive == check_stats.nactive);
assert(psset->merged_stats.ndirty == check_stats.ndirty);
}
}
static void
psset_bin_stats_insert(psset_t *psset, psset_bin_stats_t *binstats,
hpdata_t *ps) {
psset_bin_stats_insert_remove(psset, binstats, ps, true);
}
static void
psset_bin_stats_remove(psset_t *psset, psset_bin_stats_t *binstats,
hpdata_t *ps) {
psset_bin_stats_insert_remove(psset, binstats, ps, false);
}
static void
psset_hpdata_heap_remove(psset_t *psset, pszind_t pind, hpdata_t *ps) {
hpdata_age_heap_remove(&psset->pageslabs[pind], ps);
if (hpdata_age_heap_empty(&psset->pageslabs[pind])) {
fb_unset(psset->pageslab_bitmap, PSSET_NPSIZES, (size_t)pind);
}
}
static void
psset_hpdata_heap_insert(psset_t *psset, pszind_t pind, hpdata_t *ps) {
if (hpdata_age_heap_empty(&psset->pageslabs[pind])) {
fb_set(psset->pageslab_bitmap, PSSET_NPSIZES, (size_t)pind);
}
hpdata_age_heap_insert(&psset->pageslabs[pind], ps);
}
static void
psset_stats_insert(psset_t* psset, hpdata_t *ps) {
if (hpdata_empty(ps)) {
psset_bin_stats_insert(psset, psset->stats.empty_slabs, ps);
} else if (hpdata_full(ps)) {
psset_bin_stats_insert(psset, psset->stats.full_slabs, ps);
} else {
size_t longest_free_range = hpdata_longest_free_range_get(ps);
pszind_t pind = sz_psz2ind(sz_psz_quantize_floor(
longest_free_range << LG_PAGE));
assert(pind < PSSET_NPSIZES);
psset_bin_stats_insert(psset, psset->stats.nonfull_slabs[pind],
ps);
}
}
static void
psset_stats_remove(psset_t *psset, hpdata_t *ps) {
if (hpdata_empty(ps)) {
psset_bin_stats_remove(psset, psset->stats.empty_slabs, ps);
} else if (hpdata_full(ps)) {
psset_bin_stats_remove(psset, psset->stats.full_slabs, ps);
} else {
size_t longest_free_range = hpdata_longest_free_range_get(ps);
pszind_t pind = sz_psz2ind(sz_psz_quantize_floor(
longest_free_range << LG_PAGE));
assert(pind < PSSET_NPSIZES);
psset_bin_stats_remove(psset, psset->stats.nonfull_slabs[pind],
ps);
}
}
/*
* Put ps into some container so that it can be found during future allocation
* requests.
*/
static void
psset_alloc_container_insert(psset_t *psset, hpdata_t *ps) {
assert(!hpdata_in_psset_alloc_container_get(ps));
hpdata_in_psset_alloc_container_set(ps, true);
if (hpdata_empty(ps)) {
/*
* This prepend, paired with popping the head in psset_fit,
* means we implement LIFO ordering for the empty slabs set,
* which seems reasonable.
*/
hpdata_empty_list_prepend(&psset->empty, ps);
} else if (hpdata_full(ps)) {
/*
* We don't need to keep track of the full slabs; we're never
* going to return them from a psset_pick_alloc call.
*/
} else {
size_t longest_free_range = hpdata_longest_free_range_get(ps);
pszind_t pind = sz_psz2ind(sz_psz_quantize_floor(
longest_free_range << LG_PAGE));
assert(pind < PSSET_NPSIZES);
psset_hpdata_heap_insert(psset, pind, ps);
}
}
/* Remove ps from those collections. */
static void
psset_alloc_container_remove(psset_t *psset, hpdata_t *ps) {
assert(hpdata_in_psset_alloc_container_get(ps));
hpdata_in_psset_alloc_container_set(ps, false);
if (hpdata_empty(ps)) {
hpdata_empty_list_remove(&psset->empty, ps);
} else if (hpdata_full(ps)) {
/* Same as above -- do nothing in this case. */
} else {
size_t longest_free_range = hpdata_longest_free_range_get(ps);
pszind_t pind = sz_psz2ind(sz_psz_quantize_floor(
longest_free_range << LG_PAGE));
assert(pind < PSSET_NPSIZES);
psset_hpdata_heap_remove(psset, pind, ps);
}
}
static size_t
psset_purge_list_ind(hpdata_t *ps) {
size_t ndirty = hpdata_ndirty_get(ps);
/* Shouldn't have something with no dirty pages purgeable. */
assert(ndirty > 0);
/*
* Higher indices correspond to lists we'd like to purge earlier; make
* the two highest indices correspond to empty lists, which we attempt
* to purge before purging any non-empty list. This has two advantages:
* - Empty page slabs are the least likely to get reused (we'll only
* pick them for an allocation if we have no other choice).
* - Empty page slabs can purge every dirty page they contain in a
* single call, which is not usually the case.
*
* We purge hugeified empty slabs before nonhugeified ones, on the basis
* that they are fully dirty, while nonhugified slabs might not be, so
* we free up more pages more easily.
*/
if (hpdata_nactive_get(ps) == 0) {
if (hpdata_huge_get(ps)) {
return PSSET_NPURGE_LISTS - 1;
} else {
return PSSET_NPURGE_LISTS - 2;
}
}
pszind_t pind = sz_psz2ind(sz_psz_quantize_floor(ndirty << LG_PAGE));
/*
* For non-empty slabs, we may reuse them again. Prefer purging
* non-hugeified slabs before hugeified ones then, among pages of
* similar dirtiness. We still get some benefit from the hugification.
*/
return (size_t)pind * 2 + (hpdata_huge_get(ps) ? 0 : 1);
}
static void
psset_maybe_remove_purge_list(psset_t *psset, hpdata_t *ps) {
/*
* Remove the hpdata from its purge list (if it's in one). Even if it's
* going to stay in the same one, by appending it during
* psset_update_end, we move it to the end of its queue, so that we
* purge LRU within a given dirtiness bucket.
*/
if (hpdata_purge_allowed_get(ps)) {
size_t ind = psset_purge_list_ind(ps);
hpdata_purge_list_t *purge_list = &psset->to_purge[ind];
hpdata_purge_list_remove(purge_list, ps);
if (hpdata_purge_list_empty(purge_list)) {
fb_unset(psset->purge_bitmap, PSSET_NPURGE_LISTS, ind);
}
}
}
static void
psset_maybe_insert_purge_list(psset_t *psset, hpdata_t *ps) {
if (hpdata_purge_allowed_get(ps)) {
size_t ind = psset_purge_list_ind(ps);
hpdata_purge_list_t *purge_list = &psset->to_purge[ind];
if (hpdata_purge_list_empty(purge_list)) {
fb_set(psset->purge_bitmap, PSSET_NPURGE_LISTS, ind);
}
hpdata_purge_list_append(purge_list, ps);
}
}
void
psset_update_begin(psset_t *psset, hpdata_t *ps) {
hpdata_assert_consistent(ps);
assert(hpdata_in_psset_get(ps));
hpdata_updating_set(ps, true);
psset_stats_remove(psset, ps);
if (hpdata_in_psset_alloc_container_get(ps)) {
/*
* Some metadata updates can break alloc container invariants
* (e.g. the longest free range determines the hpdata_heap_t the
* pageslab lives in).
*/
assert(hpdata_alloc_allowed_get(ps));
psset_alloc_container_remove(psset, ps);
}
psset_maybe_remove_purge_list(psset, ps);
/*
* We don't update presence in the hugify list; we try to keep it FIFO,
* even in the presence of other metadata updates. We'll update
* presence at the end of the metadata update if necessary.
*/
}
void
psset_update_end(psset_t *psset, hpdata_t *ps) {
assert(hpdata_in_psset_get(ps));
hpdata_updating_set(ps, false);
psset_stats_insert(psset, ps);
/*
* The update begin should have removed ps from whatever alloc container
* it was in.
*/
assert(!hpdata_in_psset_alloc_container_get(ps));
if (hpdata_alloc_allowed_get(ps)) {
psset_alloc_container_insert(psset, ps);
}
psset_maybe_insert_purge_list(psset, ps);
if (hpdata_hugify_allowed_get(ps)
&& !hpdata_in_psset_hugify_container_get(ps)) {
hpdata_in_psset_hugify_container_set(ps, true);
hpdata_hugify_list_append(&psset->to_hugify, ps);
} else if (!hpdata_hugify_allowed_get(ps)
&& hpdata_in_psset_hugify_container_get(ps)) {
hpdata_in_psset_hugify_container_set(ps, false);
hpdata_hugify_list_remove(&psset->to_hugify, ps);
}
hpdata_assert_consistent(ps);
}
hpdata_t *
psset_pick_alloc(psset_t *psset, size_t size) {
assert((size & PAGE_MASK) == 0);
assert(size <= HUGEPAGE);
pszind_t min_pind = sz_psz2ind(sz_psz_quantize_ceil(size));
pszind_t pind = (pszind_t)fb_ffs(psset->pageslab_bitmap, PSSET_NPSIZES,
(size_t)min_pind);
if (pind == PSSET_NPSIZES) {
return hpdata_empty_list_first(&psset->empty);
}
hpdata_t *ps = hpdata_age_heap_first(&psset->pageslabs[pind]);
if (ps == NULL) {
return NULL;
}
hpdata_assert_consistent(ps);
return ps;
}
hpdata_t *
psset_pick_purge(psset_t *psset) {
ssize_t ind_ssz = fb_fls(psset->purge_bitmap, PSSET_NPURGE_LISTS,
PSSET_NPURGE_LISTS - 1);
if (ind_ssz < 0) {
return NULL;
}
pszind_t ind = (pszind_t)ind_ssz;
assert(ind < PSSET_NPURGE_LISTS);
hpdata_t *ps = hpdata_purge_list_first(&psset->to_purge[ind]);
assert(ps != NULL);
return ps;
}
hpdata_t *
psset_pick_hugify(psset_t *psset) {
return hpdata_hugify_list_first(&psset->to_hugify);
}
void
psset_insert(psset_t *psset, hpdata_t *ps) {
hpdata_in_psset_set(ps, true);
psset_stats_insert(psset, ps);
if (hpdata_alloc_allowed_get(ps)) {
psset_alloc_container_insert(psset, ps);
}
psset_maybe_insert_purge_list(psset, ps);
if (hpdata_hugify_allowed_get(ps)) {
hpdata_in_psset_hugify_container_set(ps, true);
hpdata_hugify_list_append(&psset->to_hugify, ps);
}
}
void
psset_remove(psset_t *psset, hpdata_t *ps) {
hpdata_in_psset_set(ps, false);
psset_stats_remove(psset, ps);
if (hpdata_in_psset_alloc_container_get(ps)) {
psset_alloc_container_remove(psset, ps);
}
psset_maybe_remove_purge_list(psset, ps);
if (hpdata_in_psset_hugify_container_get(ps)) {
hpdata_in_psset_hugify_container_set(ps, false);
hpdata_hugify_list_remove(&psset->to_hugify, ps);
}
}

261
BeefRT/JEMalloc/src/rtree.c Normal file
View file

@ -0,0 +1,261 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/mutex.h"
/*
* Only the most significant bits of keys passed to rtree_{read,write}() are
* used.
*/
bool
rtree_new(rtree_t *rtree, base_t *base, bool zeroed) {
#ifdef JEMALLOC_JET
if (!zeroed) {
memset(rtree, 0, sizeof(rtree_t)); /* Clear root. */
}
#else
assert(zeroed);
#endif
rtree->base = base;
if (malloc_mutex_init(&rtree->init_lock, "rtree", WITNESS_RANK_RTREE,
malloc_mutex_rank_exclusive)) {
return true;
}
return false;
}
static rtree_node_elm_t *
rtree_node_alloc(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
return (rtree_node_elm_t *)base_alloc(tsdn, rtree->base,
nelms * sizeof(rtree_node_elm_t), CACHELINE);
}
static rtree_leaf_elm_t *
rtree_leaf_alloc(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
return (rtree_leaf_elm_t *)base_alloc(tsdn, rtree->base,
nelms * sizeof(rtree_leaf_elm_t), CACHELINE);
}
static rtree_node_elm_t *
rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level,
atomic_p_t *elmp) {
malloc_mutex_lock(tsdn, &rtree->init_lock);
/*
* If *elmp is non-null, then it was initialized with the init lock
* held, so we can get by with 'relaxed' here.
*/
rtree_node_elm_t *node = atomic_load_p(elmp, ATOMIC_RELAXED);
if (node == NULL) {
node = rtree_node_alloc(tsdn, rtree, ZU(1) <<
rtree_levels[level].bits);
if (node == NULL) {
malloc_mutex_unlock(tsdn, &rtree->init_lock);
return NULL;
}
/*
* Even though we hold the lock, a later reader might not; we
* need release semantics.
*/
atomic_store_p(elmp, node, ATOMIC_RELEASE);
}
malloc_mutex_unlock(tsdn, &rtree->init_lock);
return node;
}
static rtree_leaf_elm_t *
rtree_leaf_init(tsdn_t *tsdn, rtree_t *rtree, atomic_p_t *elmp) {
malloc_mutex_lock(tsdn, &rtree->init_lock);
/*
* If *elmp is non-null, then it was initialized with the init lock
* held, so we can get by with 'relaxed' here.
*/
rtree_leaf_elm_t *leaf = atomic_load_p(elmp, ATOMIC_RELAXED);
if (leaf == NULL) {
leaf = rtree_leaf_alloc(tsdn, rtree, ZU(1) <<
rtree_levels[RTREE_HEIGHT-1].bits);
if (leaf == NULL) {
malloc_mutex_unlock(tsdn, &rtree->init_lock);
return NULL;
}
/*
* Even though we hold the lock, a later reader might not; we
* need release semantics.
*/
atomic_store_p(elmp, leaf, ATOMIC_RELEASE);
}
malloc_mutex_unlock(tsdn, &rtree->init_lock);
return leaf;
}
static bool
rtree_node_valid(rtree_node_elm_t *node) {
return ((uintptr_t)node != (uintptr_t)0);
}
static bool
rtree_leaf_valid(rtree_leaf_elm_t *leaf) {
return ((uintptr_t)leaf != (uintptr_t)0);
}
static rtree_node_elm_t *
rtree_child_node_tryread(rtree_node_elm_t *elm, bool dependent) {
rtree_node_elm_t *node;
if (dependent) {
node = (rtree_node_elm_t *)atomic_load_p(&elm->child,
ATOMIC_RELAXED);
} else {
node = (rtree_node_elm_t *)atomic_load_p(&elm->child,
ATOMIC_ACQUIRE);
}
assert(!dependent || node != NULL);
return node;
}
static rtree_node_elm_t *
rtree_child_node_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm,
unsigned level, bool dependent) {
rtree_node_elm_t *node;
node = rtree_child_node_tryread(elm, dependent);
if (!dependent && unlikely(!rtree_node_valid(node))) {
node = rtree_node_init(tsdn, rtree, level + 1, &elm->child);
}
assert(!dependent || node != NULL);
return node;
}
static rtree_leaf_elm_t *
rtree_child_leaf_tryread(rtree_node_elm_t *elm, bool dependent) {
rtree_leaf_elm_t *leaf;
if (dependent) {
leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child,
ATOMIC_RELAXED);
} else {
leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child,
ATOMIC_ACQUIRE);
}
assert(!dependent || leaf != NULL);
return leaf;
}
static rtree_leaf_elm_t *
rtree_child_leaf_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm,
unsigned level, bool dependent) {
rtree_leaf_elm_t *leaf;
leaf = rtree_child_leaf_tryread(elm, dependent);
if (!dependent && unlikely(!rtree_leaf_valid(leaf))) {
leaf = rtree_leaf_init(tsdn, rtree, &elm->child);
}
assert(!dependent || leaf != NULL);
return leaf;
}
rtree_leaf_elm_t *
rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, bool dependent, bool init_missing) {
rtree_node_elm_t *node;
rtree_leaf_elm_t *leaf;
#if RTREE_HEIGHT > 1
node = rtree->root;
#else
leaf = rtree->root;
#endif
if (config_debug) {
uintptr_t leafkey = rtree_leafkey(key);
for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) {
assert(rtree_ctx->cache[i].leafkey != leafkey);
}
for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) {
assert(rtree_ctx->l2_cache[i].leafkey != leafkey);
}
}
#define RTREE_GET_CHILD(level) { \
assert(level < RTREE_HEIGHT-1); \
if (level != 0 && !dependent && \
unlikely(!rtree_node_valid(node))) { \
return NULL; \
} \
uintptr_t subkey = rtree_subkey(key, level); \
if (level + 2 < RTREE_HEIGHT) { \
node = init_missing ? \
rtree_child_node_read(tsdn, rtree, \
&node[subkey], level, dependent) : \
rtree_child_node_tryread(&node[subkey], \
dependent); \
} else { \
leaf = init_missing ? \
rtree_child_leaf_read(tsdn, rtree, \
&node[subkey], level, dependent) : \
rtree_child_leaf_tryread(&node[subkey], \
dependent); \
} \
}
/*
* Cache replacement upon hard lookup (i.e. L1 & L2 rtree cache miss):
* (1) evict last entry in L2 cache; (2) move the collision slot from L1
* cache down to L2; and 3) fill L1.
*/
#define RTREE_GET_LEAF(level) { \
assert(level == RTREE_HEIGHT-1); \
if (!dependent && unlikely(!rtree_leaf_valid(leaf))) { \
return NULL; \
} \
if (RTREE_CTX_NCACHE_L2 > 1) { \
memmove(&rtree_ctx->l2_cache[1], \
&rtree_ctx->l2_cache[0], \
sizeof(rtree_ctx_cache_elm_t) * \
(RTREE_CTX_NCACHE_L2 - 1)); \
} \
size_t slot = rtree_cache_direct_map(key); \
rtree_ctx->l2_cache[0].leafkey = \
rtree_ctx->cache[slot].leafkey; \
rtree_ctx->l2_cache[0].leaf = \
rtree_ctx->cache[slot].leaf; \
uintptr_t leafkey = rtree_leafkey(key); \
rtree_ctx->cache[slot].leafkey = leafkey; \
rtree_ctx->cache[slot].leaf = leaf; \
uintptr_t subkey = rtree_subkey(key, level); \
return &leaf[subkey]; \
}
if (RTREE_HEIGHT > 1) {
RTREE_GET_CHILD(0)
}
if (RTREE_HEIGHT > 2) {
RTREE_GET_CHILD(1)
}
if (RTREE_HEIGHT > 3) {
for (unsigned i = 2; i < RTREE_HEIGHT-1; i++) {
RTREE_GET_CHILD(i)
}
}
RTREE_GET_LEAF(RTREE_HEIGHT-1)
#undef RTREE_GET_CHILD
#undef RTREE_GET_LEAF
not_reached();
}
void
rtree_ctx_data_init(rtree_ctx_t *ctx) {
for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) {
rtree_ctx_cache_elm_t *cache = &ctx->cache[i];
cache->leafkey = RTREE_LEAFKEY_INVALID;
cache->leaf = NULL;
}
for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) {
rtree_ctx_cache_elm_t *cache = &ctx->l2_cache[i];
cache->leafkey = RTREE_LEAFKEY_INVALID;
cache->leaf = NULL;
}
}

View file

@ -0,0 +1,36 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
static safety_check_abort_hook_t safety_check_abort;
void safety_check_fail_sized_dealloc(bool current_dealloc, const void *ptr,
size_t true_size, size_t input_size) {
char *src = current_dealloc ? "the current pointer being freed" :
"in thread cache, possibly from previous deallocations";
safety_check_fail("<jemalloc>: size mismatch detected (true size %zu "
"vs input size %zu), likely caused by application sized "
"deallocation bugs (source address: %p, %s). Suggest building with "
"--enable-debug or address sanitizer for debugging. Abort.\n",
true_size, input_size, ptr, src);
}
void safety_check_set_abort(safety_check_abort_hook_t abort_fn) {
safety_check_abort = abort_fn;
}
void safety_check_fail(const char *format, ...) {
char buf[MALLOC_PRINTF_BUFSIZE];
va_list ap;
va_start(ap, format);
malloc_vsnprintf(buf, MALLOC_PRINTF_BUFSIZE, format, ap);
va_end(ap);
if (safety_check_abort == NULL) {
malloc_write(buf);
abort();
} else {
safety_check_abort(buf);
}
}

208
BeefRT/JEMalloc/src/san.c Normal file
View file

@ -0,0 +1,208 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/ehooks.h"
#include "jemalloc/internal/san.h"
#include "jemalloc/internal/tsd.h"
/* The sanitizer options. */
size_t opt_san_guard_large = SAN_GUARD_LARGE_EVERY_N_EXTENTS_DEFAULT;
size_t opt_san_guard_small = SAN_GUARD_SMALL_EVERY_N_EXTENTS_DEFAULT;
/* Aligned (-1 is off) ptrs will be junked & stashed on dealloc. */
ssize_t opt_lg_san_uaf_align = SAN_LG_UAF_ALIGN_DEFAULT;
/*
* Initialized in san_init(). When disabled, the mask is set to (uintptr_t)-1
* to always fail the nonfast_align check.
*/
uintptr_t san_cache_bin_nonfast_mask = SAN_CACHE_BIN_NONFAST_MASK_DEFAULT;
static inline void
san_find_guarded_addr(edata_t *edata, uintptr_t *guard1, uintptr_t *guard2,
uintptr_t *addr, size_t size, bool left, bool right) {
assert(!edata_guarded_get(edata));
assert(size % PAGE == 0);
*addr = (uintptr_t)edata_base_get(edata);
if (left) {
*guard1 = *addr;
*addr += SAN_PAGE_GUARD;
} else {
*guard1 = 0;
}
if (right) {
*guard2 = *addr + size;
} else {
*guard2 = 0;
}
}
static inline void
san_find_unguarded_addr(edata_t *edata, uintptr_t *guard1, uintptr_t *guard2,
uintptr_t *addr, size_t size, bool left, bool right) {
assert(edata_guarded_get(edata));
assert(size % PAGE == 0);
*addr = (uintptr_t)edata_base_get(edata);
if (right) {
*guard2 = *addr + size;
} else {
*guard2 = 0;
}
if (left) {
*guard1 = *addr - SAN_PAGE_GUARD;
assert(*guard1 != 0);
*addr = *guard1;
} else {
*guard1 = 0;
}
}
void
san_guard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, emap_t *emap,
bool left, bool right, bool remap) {
assert(left || right);
if (remap) {
emap_deregister_boundary(tsdn, emap, edata);
}
size_t size_with_guards = edata_size_get(edata);
size_t usize = (left && right)
? san_two_side_unguarded_sz(size_with_guards)
: san_one_side_unguarded_sz(size_with_guards);
uintptr_t guard1, guard2, addr;
san_find_guarded_addr(edata, &guard1, &guard2, &addr, usize, left,
right);
assert(edata_state_get(edata) == extent_state_active);
ehooks_guard(tsdn, ehooks, (void *)guard1, (void *)guard2);
/* Update the guarded addr and usable size of the edata. */
edata_size_set(edata, usize);
edata_addr_set(edata, (void *)addr);
edata_guarded_set(edata, true);
if (remap) {
emap_register_boundary(tsdn, emap, edata, SC_NSIZES,
/* slab */ false);
}
}
static void
san_unguard_pages_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
emap_t *emap, bool left, bool right, bool remap) {
assert(left || right);
/* Remove the inner boundary which no longer exists. */
if (remap) {
assert(edata_state_get(edata) == extent_state_active);
emap_deregister_boundary(tsdn, emap, edata);
} else {
assert(edata_state_get(edata) == extent_state_retained);
}
size_t size = edata_size_get(edata);
size_t size_with_guards = (left && right)
? san_two_side_guarded_sz(size)
: san_one_side_guarded_sz(size);
uintptr_t guard1, guard2, addr;
san_find_unguarded_addr(edata, &guard1, &guard2, &addr, size, left,
right);
ehooks_unguard(tsdn, ehooks, (void *)guard1, (void *)guard2);
/* Update the true addr and usable size of the edata. */
edata_size_set(edata, size_with_guards);
edata_addr_set(edata, (void *)addr);
edata_guarded_set(edata, false);
/*
* Then re-register the outer boundary including the guards, if
* requested.
*/
if (remap) {
emap_register_boundary(tsdn, emap, edata, SC_NSIZES,
/* slab */ false);
}
}
void
san_unguard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
emap_t *emap, bool left, bool right) {
san_unguard_pages_impl(tsdn, ehooks, edata, emap, left, right,
/* remap */ true);
}
void
san_unguard_pages_pre_destroy(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
emap_t *emap) {
emap_assert_not_mapped(tsdn, emap, edata);
/*
* We don't want to touch the emap of about to be destroyed extents, as
* they have been unmapped upon eviction from the retained ecache. Also,
* we unguard the extents to the right, because retained extents only
* own their right guard page per san_bump_alloc's logic.
*/
san_unguard_pages_impl(tsdn, ehooks, edata, emap, /* left */ false,
/* right */ true, /* remap */ false);
}
static bool
san_stashed_corrupted(void *ptr, size_t size) {
if (san_junk_ptr_should_slow()) {
for (size_t i = 0; i < size; i++) {
if (((char *)ptr)[i] != (char)uaf_detect_junk) {
return true;
}
}
return false;
}
void *first, *mid, *last;
san_junk_ptr_locations(ptr, size, &first, &mid, &last);
if (*(uintptr_t *)first != uaf_detect_junk ||
*(uintptr_t *)mid != uaf_detect_junk ||
*(uintptr_t *)last != uaf_detect_junk) {
return true;
}
return false;
}
void
san_check_stashed_ptrs(void **ptrs, size_t nstashed, size_t usize) {
/*
* Verify that the junked-filled & stashed pointers remain unchanged, to
* detect write-after-free.
*/
for (size_t n = 0; n < nstashed; n++) {
void *stashed = ptrs[n];
assert(stashed != NULL);
assert(cache_bin_nonfast_aligned(stashed));
if (unlikely(san_stashed_corrupted(stashed, usize))) {
safety_check_fail("<jemalloc>: Write-after-free "
"detected on deallocated pointer %p (size %zu).\n",
stashed, usize);
}
}
}
void
tsd_san_init(tsd_t *tsd) {
*tsd_san_extents_until_guard_smallp_get(tsd) = opt_san_guard_small;
*tsd_san_extents_until_guard_largep_get(tsd) = opt_san_guard_large;
}
void
san_init(ssize_t lg_san_uaf_align) {
assert(lg_san_uaf_align == -1 || lg_san_uaf_align >= LG_PAGE);
if (lg_san_uaf_align == -1) {
san_cache_bin_nonfast_mask = (uintptr_t)-1;
return;
}
san_cache_bin_nonfast_mask = ((uintptr_t)1 << lg_san_uaf_align) - 1;
}

View file

@ -0,0 +1,104 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/san_bump.h"
#include "jemalloc/internal/pac.h"
#include "jemalloc/internal/san.h"
#include "jemalloc/internal/ehooks.h"
#include "jemalloc/internal/edata_cache.h"
static bool
san_bump_grow_locked(tsdn_t *tsdn, san_bump_alloc_t *sba, pac_t *pac,
ehooks_t *ehooks, size_t size);
edata_t *
san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t* sba, pac_t *pac,
ehooks_t *ehooks, size_t size, bool zero) {
assert(san_bump_enabled());
edata_t* to_destroy;
size_t guarded_size = san_one_side_guarded_sz(size);
malloc_mutex_lock(tsdn, &sba->mtx);
if (sba->curr_reg == NULL ||
edata_size_get(sba->curr_reg) < guarded_size) {
/*
* If the current region can't accommodate the allocation,
* try replacing it with a larger one and destroy current if the
* replacement succeeds.
*/
to_destroy = sba->curr_reg;
bool err = san_bump_grow_locked(tsdn, sba, pac, ehooks,
guarded_size);
if (err) {
goto label_err;
}
} else {
to_destroy = NULL;
}
assert(guarded_size <= edata_size_get(sba->curr_reg));
size_t trail_size = edata_size_get(sba->curr_reg) - guarded_size;
edata_t* edata;
if (trail_size != 0) {
edata_t* curr_reg_trail = extent_split_wrapper(tsdn, pac,
ehooks, sba->curr_reg, guarded_size, trail_size,
/* holding_core_locks */ true);
if (curr_reg_trail == NULL) {
goto label_err;
}
edata = sba->curr_reg;
sba->curr_reg = curr_reg_trail;
} else {
edata = sba->curr_reg;
sba->curr_reg = NULL;
}
malloc_mutex_unlock(tsdn, &sba->mtx);
assert(!edata_guarded_get(edata));
assert(sba->curr_reg == NULL || !edata_guarded_get(sba->curr_reg));
assert(to_destroy == NULL || !edata_guarded_get(to_destroy));
if (to_destroy != NULL) {
extent_destroy_wrapper(tsdn, pac, ehooks, to_destroy);
}
san_guard_pages(tsdn, ehooks, edata, pac->emap, /* left */ false,
/* right */ true, /* remap */ true);
if (extent_commit_zero(tsdn, ehooks, edata, /* commit */ true, zero,
/* growing_retained */ false)) {
extent_record(tsdn, pac, ehooks, &pac->ecache_retained,
edata);
return NULL;
}
if (config_prof) {
extent_gdump_add(tsdn, edata);
}
return edata;
label_err:
malloc_mutex_unlock(tsdn, &sba->mtx);
return NULL;
}
static bool
san_bump_grow_locked(tsdn_t *tsdn, san_bump_alloc_t *sba, pac_t *pac,
ehooks_t *ehooks, size_t size) {
malloc_mutex_assert_owner(tsdn, &sba->mtx);
bool committed = false, zeroed = false;
size_t alloc_size = size > SBA_RETAINED_ALLOC_SIZE ? size :
SBA_RETAINED_ALLOC_SIZE;
assert((alloc_size & PAGE_MASK) == 0);
sba->curr_reg = extent_alloc_wrapper(tsdn, pac, ehooks, NULL,
alloc_size, PAGE, zeroed, &committed,
/* growing_retained */ true);
if (sba->curr_reg == NULL) {
return true;
}
return false;
}

306
BeefRT/JEMalloc/src/sc.c Normal file
View file

@ -0,0 +1,306 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/sc.h"
/*
* This module computes the size classes used to satisfy allocations. The logic
* here was ported more or less line-by-line from a shell script, and because of
* that is not the most idiomatic C. Eventually we should fix this, but for now
* at least the damage is compartmentalized to this file.
*/
size_t
reg_size_compute(int lg_base, int lg_delta, int ndelta) {
return (ZU(1) << lg_base) + (ZU(ndelta) << lg_delta);
}
/* Returns the number of pages in the slab. */
static int
slab_size(int lg_page, int lg_base, int lg_delta, int ndelta) {
size_t page = (ZU(1) << lg_page);
size_t reg_size = reg_size_compute(lg_base, lg_delta, ndelta);
size_t try_slab_size = page;
size_t try_nregs = try_slab_size / reg_size;
size_t perfect_slab_size = 0;
bool perfect = false;
/*
* This loop continues until we find the least common multiple of the
* page size and size class size. Size classes are all of the form
* base + ndelta * delta == (ndelta + base/ndelta) * delta, which is
* (ndelta + ngroup) * delta. The way we choose slabbing strategies
* means that delta is at most the page size and ndelta < ngroup. So
* the loop executes for at most 2 * ngroup - 1 iterations, which is
* also the bound on the number of pages in a slab chosen by default.
* With the current default settings, this is at most 7.
*/
while (!perfect) {
perfect_slab_size = try_slab_size;
size_t perfect_nregs = try_nregs;
try_slab_size += page;
try_nregs = try_slab_size / reg_size;
if (perfect_slab_size == perfect_nregs * reg_size) {
perfect = true;
}
}
return (int)(perfect_slab_size / page);
}
static void
size_class(
/* Output. */
sc_t *sc,
/* Configuration decisions. */
int lg_max_lookup, int lg_page, int lg_ngroup,
/* Inputs specific to the size class. */
int index, int lg_base, int lg_delta, int ndelta) {
sc->index = index;
sc->lg_base = lg_base;
sc->lg_delta = lg_delta;
sc->ndelta = ndelta;
size_t size = reg_size_compute(lg_base, lg_delta, ndelta);
sc->psz = (size % (ZU(1) << lg_page) == 0);
if (index == 0) {
assert(!sc->psz);
}
if (size < (ZU(1) << (lg_page + lg_ngroup))) {
sc->bin = true;
sc->pgs = slab_size(lg_page, lg_base, lg_delta, ndelta);
} else {
sc->bin = false;
sc->pgs = 0;
}
if (size <= (ZU(1) << lg_max_lookup)) {
sc->lg_delta_lookup = lg_delta;
} else {
sc->lg_delta_lookup = 0;
}
}
static void
size_classes(
/* Output. */
sc_data_t *sc_data,
/* Determined by the system. */
size_t lg_ptr_size, int lg_quantum,
/* Configuration decisions. */
int lg_tiny_min, int lg_max_lookup, int lg_page, int lg_ngroup) {
int ptr_bits = (1 << lg_ptr_size) * 8;
int ngroup = (1 << lg_ngroup);
int ntiny = 0;
int nlbins = 0;
int lg_tiny_maxclass = (unsigned)-1;
int nbins = 0;
int npsizes = 0;
int index = 0;
int ndelta = 0;
int lg_base = lg_tiny_min;
int lg_delta = lg_base;
/* Outputs that we update as we go. */
size_t lookup_maxclass = 0;
size_t small_maxclass = 0;
int lg_large_minclass = 0;
size_t large_maxclass = 0;
/* Tiny size classes. */
while (lg_base < lg_quantum) {
sc_t *sc = &sc_data->sc[index];
size_class(sc, lg_max_lookup, lg_page, lg_ngroup, index,
lg_base, lg_delta, ndelta);
if (sc->lg_delta_lookup != 0) {
nlbins = index + 1;
}
if (sc->psz) {
npsizes++;
}
if (sc->bin) {
nbins++;
}
ntiny++;
/* Final written value is correct. */
lg_tiny_maxclass = lg_base;
index++;
lg_delta = lg_base;
lg_base++;
}
/* First non-tiny (pseudo) group. */
if (ntiny != 0) {
sc_t *sc = &sc_data->sc[index];
/*
* See the note in sc.h; the first non-tiny size class has an
* unusual encoding.
*/
lg_base--;
ndelta = 1;
size_class(sc, lg_max_lookup, lg_page, lg_ngroup, index,
lg_base, lg_delta, ndelta);
index++;
lg_base++;
lg_delta++;
if (sc->psz) {
npsizes++;
}
if (sc->bin) {
nbins++;
}
}
while (ndelta < ngroup) {
sc_t *sc = &sc_data->sc[index];
size_class(sc, lg_max_lookup, lg_page, lg_ngroup, index,
lg_base, lg_delta, ndelta);
index++;
ndelta++;
if (sc->psz) {
npsizes++;
}
if (sc->bin) {
nbins++;
}
}
/* All remaining groups. */
lg_base = lg_base + lg_ngroup;
while (lg_base < ptr_bits - 1) {
ndelta = 1;
int ndelta_limit;
if (lg_base == ptr_bits - 2) {
ndelta_limit = ngroup - 1;
} else {
ndelta_limit = ngroup;
}
while (ndelta <= ndelta_limit) {
sc_t *sc = &sc_data->sc[index];
size_class(sc, lg_max_lookup, lg_page, lg_ngroup, index,
lg_base, lg_delta, ndelta);
if (sc->lg_delta_lookup != 0) {
nlbins = index + 1;
/* Final written value is correct. */
lookup_maxclass = (ZU(1) << lg_base)
+ (ZU(ndelta) << lg_delta);
}
if (sc->psz) {
npsizes++;
}
if (sc->bin) {
nbins++;
/* Final written value is correct. */
small_maxclass = (ZU(1) << lg_base)
+ (ZU(ndelta) << lg_delta);
if (lg_ngroup > 0) {
lg_large_minclass = lg_base + 1;
} else {
lg_large_minclass = lg_base + 2;
}
}
large_maxclass = (ZU(1) << lg_base)
+ (ZU(ndelta) << lg_delta);
index++;
ndelta++;
}
lg_base++;
lg_delta++;
}
/* Additional outputs. */
int nsizes = index;
unsigned lg_ceil_nsizes = lg_ceil(nsizes);
/* Fill in the output data. */
sc_data->ntiny = ntiny;
sc_data->nlbins = nlbins;
sc_data->nbins = nbins;
sc_data->nsizes = nsizes;
sc_data->lg_ceil_nsizes = lg_ceil_nsizes;
sc_data->npsizes = npsizes;
sc_data->lg_tiny_maxclass = lg_tiny_maxclass;
sc_data->lookup_maxclass = lookup_maxclass;
sc_data->small_maxclass = small_maxclass;
sc_data->lg_large_minclass = lg_large_minclass;
sc_data->large_minclass = (ZU(1) << lg_large_minclass);
sc_data->large_maxclass = large_maxclass;
/*
* We compute these values in two ways:
* - Incrementally, as above.
* - In macros, in sc.h.
* The computation is easier when done incrementally, but putting it in
* a constant makes it available to the fast paths without having to
* touch the extra global cacheline. We assert, however, that the two
* computations are equivalent.
*/
assert(sc_data->npsizes == SC_NPSIZES);
assert(sc_data->lg_tiny_maxclass == SC_LG_TINY_MAXCLASS);
assert(sc_data->small_maxclass == SC_SMALL_MAXCLASS);
assert(sc_data->large_minclass == SC_LARGE_MINCLASS);
assert(sc_data->lg_large_minclass == SC_LG_LARGE_MINCLASS);
assert(sc_data->large_maxclass == SC_LARGE_MAXCLASS);
/*
* In the allocation fastpath, we want to assume that we can
* unconditionally subtract the requested allocation size from
* a ssize_t, and detect passing through 0 correctly. This
* results in optimal generated code. For this to work, the
* maximum allocation size must be less than SSIZE_MAX.
*/
assert(SC_LARGE_MAXCLASS < SSIZE_MAX);
}
void
sc_data_init(sc_data_t *sc_data) {
size_classes(sc_data, LG_SIZEOF_PTR, LG_QUANTUM, SC_LG_TINY_MIN,
SC_LG_MAX_LOOKUP, LG_PAGE, SC_LG_NGROUP);
sc_data->initialized = true;
}
static void
sc_data_update_sc_slab_size(sc_t *sc, size_t reg_size, size_t pgs_guess) {
size_t min_pgs = reg_size / PAGE;
if (reg_size % PAGE != 0) {
min_pgs++;
}
/*
* BITMAP_MAXBITS is actually determined by putting the smallest
* possible size-class on one page, so this can never be 0.
*/
size_t max_pgs = BITMAP_MAXBITS * reg_size / PAGE;
assert(min_pgs <= max_pgs);
assert(min_pgs > 0);
assert(max_pgs >= 1);
if (pgs_guess < min_pgs) {
sc->pgs = (int)min_pgs;
} else if (pgs_guess > max_pgs) {
sc->pgs = (int)max_pgs;
} else {
sc->pgs = (int)pgs_guess;
}
}
void
sc_data_update_slab_size(sc_data_t *data, size_t begin, size_t end, int pgs) {
assert(data->initialized);
for (int i = 0; i < data->nsizes; i++) {
sc_t *sc = &data->sc[i];
if (!sc->bin) {
break;
}
size_t reg_size = reg_size_compute(sc->lg_base, sc->lg_delta,
sc->ndelta);
if (begin <= reg_size && reg_size <= end) {
sc_data_update_sc_slab_size(sc, reg_size, pgs);
}
}
}
void
sc_boot(sc_data_t *data) {
sc_data_init(data);
}

422
BeefRT/JEMalloc/src/sec.c Normal file
View file

@ -0,0 +1,422 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/sec.h"
static edata_t *sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
size_t alignment, bool zero, bool guarded, bool frequent_reuse,
bool *deferred_work_generated);
static bool sec_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated);
static bool sec_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
size_t old_size, size_t new_size, bool *deferred_work_generated);
static void sec_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
bool *deferred_work_generated);
static void
sec_bin_init(sec_bin_t *bin) {
bin->being_batch_filled = false;
bin->bytes_cur = 0;
edata_list_active_init(&bin->freelist);
}
bool
sec_init(tsdn_t *tsdn, sec_t *sec, base_t *base, pai_t *fallback,
const sec_opts_t *opts) {
assert(opts->max_alloc >= PAGE);
size_t max_alloc = PAGE_FLOOR(opts->max_alloc);
pszind_t npsizes = sz_psz2ind(max_alloc) + 1;
size_t sz_shards = opts->nshards * sizeof(sec_shard_t);
size_t sz_bins = opts->nshards * (size_t)npsizes * sizeof(sec_bin_t);
size_t sz_alloc = sz_shards + sz_bins;
void *dynalloc = base_alloc(tsdn, base, sz_alloc, CACHELINE);
if (dynalloc == NULL) {
return true;
}
sec_shard_t *shard_cur = (sec_shard_t *)dynalloc;
sec->shards = shard_cur;
sec_bin_t *bin_cur = (sec_bin_t *)&shard_cur[opts->nshards];
/* Just for asserts, below. */
sec_bin_t *bin_start = bin_cur;
for (size_t i = 0; i < opts->nshards; i++) {
sec_shard_t *shard = shard_cur;
shard_cur++;
bool err = malloc_mutex_init(&shard->mtx, "sec_shard",
WITNESS_RANK_SEC_SHARD, malloc_mutex_rank_exclusive);
if (err) {
return true;
}
shard->enabled = true;
shard->bins = bin_cur;
for (pszind_t j = 0; j < npsizes; j++) {
sec_bin_init(&shard->bins[j]);
bin_cur++;
}
shard->bytes_cur = 0;
shard->to_flush_next = 0;
}
/*
* Should have exactly matched the bin_start to the first unused byte
* after the shards.
*/
assert((void *)shard_cur == (void *)bin_start);
/* And the last bin to use up the last bytes of the allocation. */
assert((char *)bin_cur == ((char *)dynalloc + sz_alloc));
sec->fallback = fallback;
sec->opts = *opts;
sec->npsizes = npsizes;
/*
* Initialize these last so that an improper use of an SEC whose
* initialization failed will segfault in an easy-to-spot way.
*/
sec->pai.alloc = &sec_alloc;
sec->pai.alloc_batch = &pai_alloc_batch_default;
sec->pai.expand = &sec_expand;
sec->pai.shrink = &sec_shrink;
sec->pai.dalloc = &sec_dalloc;
sec->pai.dalloc_batch = &pai_dalloc_batch_default;
return false;
}
static sec_shard_t *
sec_shard_pick(tsdn_t *tsdn, sec_t *sec) {
/*
* Eventually, we should implement affinity, tracking source shard using
* the edata_t's newly freed up fields. For now, just randomly
* distribute across all shards.
*/
if (tsdn_null(tsdn)) {
return &sec->shards[0];
}
tsd_t *tsd = tsdn_tsd(tsdn);
uint8_t *idxp = tsd_sec_shardp_get(tsd);
if (*idxp == (uint8_t)-1) {
/*
* First use; initialize using the trick from Daniel Lemire's
* "A fast alternative to the modulo reduction. Use a 64 bit
* number to store 32 bits, since we'll deliberately overflow
* when we multiply by the number of shards.
*/
uint64_t rand32 = prng_lg_range_u64(tsd_prng_statep_get(tsd), 32);
uint32_t idx =
(uint32_t)((rand32 * (uint64_t)sec->opts.nshards) >> 32);
assert(idx < (uint32_t)sec->opts.nshards);
*idxp = (uint8_t)idx;
}
return &sec->shards[*idxp];
}
/*
* Perhaps surprisingly, this can be called on the alloc pathways; if we hit an
* empty cache, we'll try to fill it, which can push the shard over it's limit.
*/
static void
sec_flush_some_and_unlock(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard) {
malloc_mutex_assert_owner(tsdn, &shard->mtx);
edata_list_active_t to_flush;
edata_list_active_init(&to_flush);
while (shard->bytes_cur > sec->opts.bytes_after_flush) {
/* Pick a victim. */
sec_bin_t *bin = &shard->bins[shard->to_flush_next];
/* Update our victim-picking state. */
shard->to_flush_next++;
if (shard->to_flush_next == sec->npsizes) {
shard->to_flush_next = 0;
}
assert(shard->bytes_cur >= bin->bytes_cur);
if (bin->bytes_cur != 0) {
shard->bytes_cur -= bin->bytes_cur;
bin->bytes_cur = 0;
edata_list_active_concat(&to_flush, &bin->freelist);
}
/*
* Either bin->bytes_cur was 0, in which case we didn't touch
* the bin list but it should be empty anyways (or else we
* missed a bytes_cur update on a list modification), or it
* *was* 0 and we emptied it ourselves. Either way, it should
* be empty now.
*/
assert(edata_list_active_empty(&bin->freelist));
}
malloc_mutex_unlock(tsdn, &shard->mtx);
bool deferred_work_generated = false;
pai_dalloc_batch(tsdn, sec->fallback, &to_flush,
&deferred_work_generated);
}
static edata_t *
sec_shard_alloc_locked(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard,
sec_bin_t *bin) {
malloc_mutex_assert_owner(tsdn, &shard->mtx);
if (!shard->enabled) {
return NULL;
}
edata_t *edata = edata_list_active_first(&bin->freelist);
if (edata != NULL) {
edata_list_active_remove(&bin->freelist, edata);
assert(edata_size_get(edata) <= bin->bytes_cur);
bin->bytes_cur -= edata_size_get(edata);
assert(edata_size_get(edata) <= shard->bytes_cur);
shard->bytes_cur -= edata_size_get(edata);
}
return edata;
}
static edata_t *
sec_batch_fill_and_alloc(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard,
sec_bin_t *bin, size_t size) {
malloc_mutex_assert_not_owner(tsdn, &shard->mtx);
edata_list_active_t result;
edata_list_active_init(&result);
bool deferred_work_generated = false;
size_t nalloc = pai_alloc_batch(tsdn, sec->fallback, size,
1 + sec->opts.batch_fill_extra, &result, &deferred_work_generated);
edata_t *ret = edata_list_active_first(&result);
if (ret != NULL) {
edata_list_active_remove(&result, ret);
}
malloc_mutex_lock(tsdn, &shard->mtx);
bin->being_batch_filled = false;
/*
* Handle the easy case first: nothing to cache. Note that this can
* only happen in case of OOM, since sec_alloc checks the expected
* number of allocs, and doesn't bother going down the batch_fill
* pathway if there won't be anything left to cache. So to be in this
* code path, we must have asked for > 1 alloc, but only gotten 1 back.
*/
if (nalloc <= 1) {
malloc_mutex_unlock(tsdn, &shard->mtx);
return ret;
}
size_t new_cached_bytes = (nalloc - 1) * size;
edata_list_active_concat(&bin->freelist, &result);
bin->bytes_cur += new_cached_bytes;
shard->bytes_cur += new_cached_bytes;
if (shard->bytes_cur > sec->opts.max_bytes) {
sec_flush_some_and_unlock(tsdn, sec, shard);
} else {
malloc_mutex_unlock(tsdn, &shard->mtx);
}
return ret;
}
static edata_t *
sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
bool guarded, bool frequent_reuse, bool *deferred_work_generated) {
assert((size & PAGE_MASK) == 0);
assert(!guarded);
sec_t *sec = (sec_t *)self;
if (zero || alignment > PAGE || sec->opts.nshards == 0
|| size > sec->opts.max_alloc) {
return pai_alloc(tsdn, sec->fallback, size, alignment, zero,
/* guarded */ false, frequent_reuse,
deferred_work_generated);
}
pszind_t pszind = sz_psz2ind(size);
assert(pszind < sec->npsizes);
sec_shard_t *shard = sec_shard_pick(tsdn, sec);
sec_bin_t *bin = &shard->bins[pszind];
bool do_batch_fill = false;
malloc_mutex_lock(tsdn, &shard->mtx);
edata_t *edata = sec_shard_alloc_locked(tsdn, sec, shard, bin);
if (edata == NULL) {
if (!bin->being_batch_filled
&& sec->opts.batch_fill_extra > 0) {
bin->being_batch_filled = true;
do_batch_fill = true;
}
}
malloc_mutex_unlock(tsdn, &shard->mtx);
if (edata == NULL) {
if (do_batch_fill) {
edata = sec_batch_fill_and_alloc(tsdn, sec, shard, bin,
size);
} else {
edata = pai_alloc(tsdn, sec->fallback, size, alignment,
zero, /* guarded */ false, frequent_reuse,
deferred_work_generated);
}
}
return edata;
}
static bool
sec_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
size_t new_size, bool zero, bool *deferred_work_generated) {
sec_t *sec = (sec_t *)self;
return pai_expand(tsdn, sec->fallback, edata, old_size, new_size, zero,
deferred_work_generated);
}
static bool
sec_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
size_t new_size, bool *deferred_work_generated) {
sec_t *sec = (sec_t *)self;
return pai_shrink(tsdn, sec->fallback, edata, old_size, new_size,
deferred_work_generated);
}
static void
sec_flush_all_locked(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard) {
malloc_mutex_assert_owner(tsdn, &shard->mtx);
shard->bytes_cur = 0;
edata_list_active_t to_flush;
edata_list_active_init(&to_flush);
for (pszind_t i = 0; i < sec->npsizes; i++) {
sec_bin_t *bin = &shard->bins[i];
bin->bytes_cur = 0;
edata_list_active_concat(&to_flush, &bin->freelist);
}
/*
* Ordinarily we would try to avoid doing the batch deallocation while
* holding the shard mutex, but the flush_all pathways only happen when
* we're disabling the HPA or resetting the arena, both of which are
* rare pathways.
*/
bool deferred_work_generated = false;
pai_dalloc_batch(tsdn, sec->fallback, &to_flush,
&deferred_work_generated);
}
static void
sec_shard_dalloc_and_unlock(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard,
edata_t *edata) {
malloc_mutex_assert_owner(tsdn, &shard->mtx);
assert(shard->bytes_cur <= sec->opts.max_bytes);
size_t size = edata_size_get(edata);
pszind_t pszind = sz_psz2ind(size);
assert(pszind < sec->npsizes);
/*
* Prepending here results in LIFO allocation per bin, which seems
* reasonable.
*/
sec_bin_t *bin = &shard->bins[pszind];
edata_list_active_prepend(&bin->freelist, edata);
bin->bytes_cur += size;
shard->bytes_cur += size;
if (shard->bytes_cur > sec->opts.max_bytes) {
/*
* We've exceeded the shard limit. We make two nods in the
* direction of fragmentation avoidance: we flush everything in
* the shard, rather than one particular bin, and we hold the
* lock while flushing (in case one of the extents we flush is
* highly preferred from a fragmentation-avoidance perspective
* in the backing allocator). This has the extra advantage of
* not requiring advanced cache balancing strategies.
*/
sec_flush_some_and_unlock(tsdn, sec, shard);
malloc_mutex_assert_not_owner(tsdn, &shard->mtx);
} else {
malloc_mutex_unlock(tsdn, &shard->mtx);
}
}
static void
sec_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
bool *deferred_work_generated) {
sec_t *sec = (sec_t *)self;
if (sec->opts.nshards == 0
|| edata_size_get(edata) > sec->opts.max_alloc) {
pai_dalloc(tsdn, sec->fallback, edata,
deferred_work_generated);
return;
}
sec_shard_t *shard = sec_shard_pick(tsdn, sec);
malloc_mutex_lock(tsdn, &shard->mtx);
if (shard->enabled) {
sec_shard_dalloc_and_unlock(tsdn, sec, shard, edata);
} else {
malloc_mutex_unlock(tsdn, &shard->mtx);
pai_dalloc(tsdn, sec->fallback, edata,
deferred_work_generated);
}
}
void
sec_flush(tsdn_t *tsdn, sec_t *sec) {
for (size_t i = 0; i < sec->opts.nshards; i++) {
malloc_mutex_lock(tsdn, &sec->shards[i].mtx);
sec_flush_all_locked(tsdn, sec, &sec->shards[i]);
malloc_mutex_unlock(tsdn, &sec->shards[i].mtx);
}
}
void
sec_disable(tsdn_t *tsdn, sec_t *sec) {
for (size_t i = 0; i < sec->opts.nshards; i++) {
malloc_mutex_lock(tsdn, &sec->shards[i].mtx);
sec->shards[i].enabled = false;
sec_flush_all_locked(tsdn, sec, &sec->shards[i]);
malloc_mutex_unlock(tsdn, &sec->shards[i].mtx);
}
}
void
sec_stats_merge(tsdn_t *tsdn, sec_t *sec, sec_stats_t *stats) {
size_t sum = 0;
for (size_t i = 0; i < sec->opts.nshards; i++) {
/*
* We could save these lock acquisitions by making bytes_cur
* atomic, but stats collection is rare anyways and we expect
* the number and type of stats to get more interesting.
*/
malloc_mutex_lock(tsdn, &sec->shards[i].mtx);
sum += sec->shards[i].bytes_cur;
malloc_mutex_unlock(tsdn, &sec->shards[i].mtx);
}
stats->bytes += sum;
}
void
sec_mutex_stats_read(tsdn_t *tsdn, sec_t *sec,
mutex_prof_data_t *mutex_prof_data) {
for (size_t i = 0; i < sec->opts.nshards; i++) {
malloc_mutex_lock(tsdn, &sec->shards[i].mtx);
malloc_mutex_prof_accum(tsdn, mutex_prof_data,
&sec->shards[i].mtx);
malloc_mutex_unlock(tsdn, &sec->shards[i].mtx);
}
}
void
sec_prefork2(tsdn_t *tsdn, sec_t *sec) {
for (size_t i = 0; i < sec->opts.nshards; i++) {
malloc_mutex_prefork(tsdn, &sec->shards[i].mtx);
}
}
void
sec_postfork_parent(tsdn_t *tsdn, sec_t *sec) {
for (size_t i = 0; i < sec->opts.nshards; i++) {
malloc_mutex_postfork_parent(tsdn, &sec->shards[i].mtx);
}
}
void
sec_postfork_child(tsdn_t *tsdn, sec_t *sec) {
for (size_t i = 0; i < sec->opts.nshards; i++) {
malloc_mutex_postfork_child(tsdn, &sec->shards[i].mtx);
}
}

1973
BeefRT/JEMalloc/src/stats.c Normal file

File diff suppressed because it is too large Load diff

114
BeefRT/JEMalloc/src/sz.c Normal file
View file

@ -0,0 +1,114 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/sz.h"
JEMALLOC_ALIGNED(CACHELINE)
size_t sz_pind2sz_tab[SC_NPSIZES+1];
size_t sz_large_pad;
size_t
sz_psz_quantize_floor(size_t size) {
size_t ret;
pszind_t pind;
assert(size > 0);
assert((size & PAGE_MASK) == 0);
pind = sz_psz2ind(size - sz_large_pad + 1);
if (pind == 0) {
/*
* Avoid underflow. This short-circuit would also do the right
* thing for all sizes in the range for which there are
* PAGE-spaced size classes, but it's simplest to just handle
* the one case that would cause erroneous results.
*/
return size;
}
ret = sz_pind2sz(pind - 1) + sz_large_pad;
assert(ret <= size);
return ret;
}
size_t
sz_psz_quantize_ceil(size_t size) {
size_t ret;
assert(size > 0);
assert(size - sz_large_pad <= SC_LARGE_MAXCLASS);
assert((size & PAGE_MASK) == 0);
ret = sz_psz_quantize_floor(size);
if (ret < size) {
/*
* Skip a quantization that may have an adequately large extent,
* because under-sized extents may be mixed in. This only
* happens when an unusual size is requested, i.e. for aligned
* allocation, and is just one of several places where linear
* search would potentially find sufficiently aligned available
* memory somewhere lower.
*/
ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
sz_large_pad;
}
return ret;
}
static void
sz_boot_pind2sz_tab(const sc_data_t *sc_data) {
int pind = 0;
for (unsigned i = 0; i < SC_NSIZES; i++) {
const sc_t *sc = &sc_data->sc[i];
if (sc->psz) {
sz_pind2sz_tab[pind] = (ZU(1) << sc->lg_base)
+ (ZU(sc->ndelta) << sc->lg_delta);
pind++;
}
}
for (int i = pind; i <= (int)SC_NPSIZES; i++) {
sz_pind2sz_tab[pind] = sc_data->large_maxclass + PAGE;
}
}
JEMALLOC_ALIGNED(CACHELINE)
size_t sz_index2size_tab[SC_NSIZES];
static void
sz_boot_index2size_tab(const sc_data_t *sc_data) {
for (unsigned i = 0; i < SC_NSIZES; i++) {
const sc_t *sc = &sc_data->sc[i];
sz_index2size_tab[i] = (ZU(1) << sc->lg_base)
+ (ZU(sc->ndelta) << (sc->lg_delta));
}
}
/*
* To keep this table small, we divide sizes by the tiny min size, which gives
* the smallest interval for which the result can change.
*/
JEMALLOC_ALIGNED(CACHELINE)
uint8_t sz_size2index_tab[(SC_LOOKUP_MAXCLASS >> SC_LG_TINY_MIN) + 1];
static void
sz_boot_size2index_tab(const sc_data_t *sc_data) {
size_t dst_max = (SC_LOOKUP_MAXCLASS >> SC_LG_TINY_MIN) + 1;
size_t dst_ind = 0;
for (unsigned sc_ind = 0; sc_ind < SC_NSIZES && dst_ind < dst_max;
sc_ind++) {
const sc_t *sc = &sc_data->sc[sc_ind];
size_t sz = (ZU(1) << sc->lg_base)
+ (ZU(sc->ndelta) << sc->lg_delta);
size_t max_ind = ((sz + (ZU(1) << SC_LG_TINY_MIN) - 1)
>> SC_LG_TINY_MIN);
for (; dst_ind <= max_ind && dst_ind < dst_max; dst_ind++) {
sz_size2index_tab[dst_ind] = sc_ind;
}
}
}
void
sz_boot(const sc_data_t *sc_data, bool cache_oblivious) {
sz_large_pad = cache_oblivious ? PAGE : 0;
sz_boot_pind2sz_tab(sc_data);
sz_boot_index2size_tab(sc_data);
sz_boot_size2index_tab(sc_data);
}

1101
BeefRT/JEMalloc/src/tcache.c Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,12 @@
#include "jemalloc/internal/jemalloc_preamble.h"
/*
* The hooks are a little bit screwy -- they're not genuinely exported in the
* sense that we want them available to end-users, but we do want them visible
* from outside the generated library, so that we can use them in test code.
*/
JEMALLOC_EXPORT
void (*test_hooks_arena_new_hook)() = NULL;
JEMALLOC_EXPORT
void (*test_hooks_libc_hook)() = NULL;

View file

@ -0,0 +1,343 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/thread_event.h"
/*
* Signatures for event specific functions. These functions should be defined
* by the modules owning each event. The signatures here verify that the
* definitions follow the right format.
*
* The first two are functions computing new / postponed event wait time. New
* event wait time is the time till the next event if an event is currently
* being triggered; postponed event wait time is the time till the next event
* if an event should be triggered but needs to be postponed, e.g. when the TSD
* is not nominal or during reentrancy.
*
* The third is the event handler function, which is called whenever an event
* is triggered. The parameter is the elapsed time since the last time an
* event of the same type was triggered.
*/
#define E(event, condition_unused, is_alloc_event_unused) \
uint64_t event##_new_event_wait(tsd_t *tsd); \
uint64_t event##_postponed_event_wait(tsd_t *tsd); \
void event##_event_handler(tsd_t *tsd, uint64_t elapsed);
ITERATE_OVER_ALL_EVENTS
#undef E
/* Signatures for internal functions fetching elapsed time. */
#define E(event, condition_unused, is_alloc_event_unused) \
static uint64_t event##_fetch_elapsed(tsd_t *tsd);
ITERATE_OVER_ALL_EVENTS
#undef E
static uint64_t
tcache_gc_fetch_elapsed(tsd_t *tsd) {
return TE_INVALID_ELAPSED;
}
static uint64_t
tcache_gc_dalloc_fetch_elapsed(tsd_t *tsd) {
return TE_INVALID_ELAPSED;
}
static uint64_t
prof_sample_fetch_elapsed(tsd_t *tsd) {
uint64_t last_event = thread_allocated_last_event_get(tsd);
uint64_t last_sample_event = prof_sample_last_event_get(tsd);
prof_sample_last_event_set(tsd, last_event);
return last_event - last_sample_event;
}
static uint64_t
stats_interval_fetch_elapsed(tsd_t *tsd) {
uint64_t last_event = thread_allocated_last_event_get(tsd);
uint64_t last_stats_event = stats_interval_last_event_get(tsd);
stats_interval_last_event_set(tsd, last_event);
return last_event - last_stats_event;
}
static uint64_t
peak_alloc_fetch_elapsed(tsd_t *tsd) {
return TE_INVALID_ELAPSED;
}
static uint64_t
peak_dalloc_fetch_elapsed(tsd_t *tsd) {
return TE_INVALID_ELAPSED;
}
/* Per event facilities done. */
static bool
te_ctx_has_active_events(te_ctx_t *ctx) {
assert(config_debug);
#define E(event, condition, alloc_event) \
if (condition && alloc_event == ctx->is_alloc) { \
return true; \
}
ITERATE_OVER_ALL_EVENTS
#undef E
return false;
}
static uint64_t
te_next_event_compute(tsd_t *tsd, bool is_alloc) {
uint64_t wait = TE_MAX_START_WAIT;
#define E(event, condition, alloc_event) \
if (is_alloc == alloc_event && condition) { \
uint64_t event_wait = \
event##_event_wait_get(tsd); \
assert(event_wait <= TE_MAX_START_WAIT); \
if (event_wait > 0U && event_wait < wait) { \
wait = event_wait; \
} \
}
ITERATE_OVER_ALL_EVENTS
#undef E
assert(wait <= TE_MAX_START_WAIT);
return wait;
}
static void
te_assert_invariants_impl(tsd_t *tsd, te_ctx_t *ctx) {
uint64_t current_bytes = te_ctx_current_bytes_get(ctx);
uint64_t last_event = te_ctx_last_event_get(ctx);
uint64_t next_event = te_ctx_next_event_get(ctx);
uint64_t next_event_fast = te_ctx_next_event_fast_get(ctx);
assert(last_event != next_event);
if (next_event > TE_NEXT_EVENT_FAST_MAX || !tsd_fast(tsd)) {
assert(next_event_fast == 0U);
} else {
assert(next_event_fast == next_event);
}
/* The subtraction is intentionally susceptible to underflow. */
uint64_t interval = next_event - last_event;
/* The subtraction is intentionally susceptible to underflow. */
assert(current_bytes - last_event < interval);
uint64_t min_wait = te_next_event_compute(tsd, te_ctx_is_alloc(ctx));
/*
* next_event should have been pushed up only except when no event is
* on and the TSD is just initialized. The last_event == 0U guard
* below is stronger than needed, but having an exactly accurate guard
* is more complicated to implement.
*/
assert((!te_ctx_has_active_events(ctx) && last_event == 0U) ||
interval == min_wait ||
(interval < min_wait && interval == TE_MAX_INTERVAL));
}
void
te_assert_invariants_debug(tsd_t *tsd) {
te_ctx_t ctx;
te_ctx_get(tsd, &ctx, true);
te_assert_invariants_impl(tsd, &ctx);
te_ctx_get(tsd, &ctx, false);
te_assert_invariants_impl(tsd, &ctx);
}
/*
* Synchronization around the fast threshold in tsd --
* There are two threads to consider in the synchronization here:
* - The owner of the tsd being updated by a slow path change
* - The remote thread, doing that slow path change.
*
* As a design constraint, we want to ensure that a slow-path transition cannot
* be ignored for arbitrarily long, and that if the remote thread causes a
* slow-path transition and then communicates with the owner thread that it has
* occurred, then the owner will go down the slow path on the next allocator
* operation (so that we don't want to just wait until the owner hits its slow
* path reset condition on its own).
*
* Here's our strategy to do that:
*
* The remote thread will update the slow-path stores to TSD variables, issue a
* SEQ_CST fence, and then update the TSD next_event_fast counter. The owner
* thread will update next_event_fast, issue an SEQ_CST fence, and then check
* its TSD to see if it's on the slow path.
* This is fairly straightforward when 64-bit atomics are supported. Assume that
* the remote fence is sandwiched between two owner fences in the reset pathway.
* The case where there is no preceding or trailing owner fence (i.e. because
* the owner thread is near the beginning or end of its life) can be analyzed
* similarly. The owner store to next_event_fast preceding the earlier owner
* fence will be earlier in coherence order than the remote store to it, so that
* the owner thread will go down the slow path once the store becomes visible to
* it, which is no later than the time of the second fence.
* The case where we don't support 64-bit atomics is trickier, since word
* tearing is possible. We'll repeat the same analysis, and look at the two
* owner fences sandwiching the remote fence. The next_event_fast stores done
* alongside the earlier owner fence cannot overwrite any of the remote stores
* (since they precede the earlier owner fence in sb, which precedes the remote
* fence in sc, which precedes the remote stores in sb). After the second owner
* fence there will be a re-check of the slow-path variables anyways, so the
* "owner will notice that it's on the slow path eventually" guarantee is
* satisfied. To make sure that the out-of-band-messaging constraint is as well,
* note that either the message passing is sequenced before the second owner
* fence (in which case the remote stores happen before the second set of owner
* stores, so malloc sees a value of zero for next_event_fast and goes down the
* slow path), or it is not (in which case the owner sees the tsd slow-path
* writes on its previous update). This leaves open the possibility that the
* remote thread will (at some arbitrary point in the future) zero out one half
* of the owner thread's next_event_fast, but that's always safe (it just sends
* it down the slow path earlier).
*/
static void
te_ctx_next_event_fast_update(te_ctx_t *ctx) {
uint64_t next_event = te_ctx_next_event_get(ctx);
uint64_t next_event_fast = (next_event <= TE_NEXT_EVENT_FAST_MAX) ?
next_event : 0U;
te_ctx_next_event_fast_set(ctx, next_event_fast);
}
void
te_recompute_fast_threshold(tsd_t *tsd) {
if (tsd_state_get(tsd) != tsd_state_nominal) {
/* Check first because this is also called on purgatory. */
te_next_event_fast_set_non_nominal(tsd);
return;
}
te_ctx_t ctx;
te_ctx_get(tsd, &ctx, true);
te_ctx_next_event_fast_update(&ctx);
te_ctx_get(tsd, &ctx, false);
te_ctx_next_event_fast_update(&ctx);
atomic_fence(ATOMIC_SEQ_CST);
if (tsd_state_get(tsd) != tsd_state_nominal) {
te_next_event_fast_set_non_nominal(tsd);
}
}
static void
te_adjust_thresholds_helper(tsd_t *tsd, te_ctx_t *ctx,
uint64_t wait) {
/*
* The next threshold based on future events can only be adjusted after
* progressing the last_event counter (which is set to current).
*/
assert(te_ctx_current_bytes_get(ctx) == te_ctx_last_event_get(ctx));
assert(wait <= TE_MAX_START_WAIT);
uint64_t next_event = te_ctx_last_event_get(ctx) + (wait <=
TE_MAX_INTERVAL ? wait : TE_MAX_INTERVAL);
te_ctx_next_event_set(tsd, ctx, next_event);
}
static uint64_t
te_clip_event_wait(uint64_t event_wait) {
assert(event_wait > 0U);
if (TE_MIN_START_WAIT > 1U &&
unlikely(event_wait < TE_MIN_START_WAIT)) {
event_wait = TE_MIN_START_WAIT;
}
if (TE_MAX_START_WAIT < UINT64_MAX &&
unlikely(event_wait > TE_MAX_START_WAIT)) {
event_wait = TE_MAX_START_WAIT;
}
return event_wait;
}
void
te_event_trigger(tsd_t *tsd, te_ctx_t *ctx) {
/* usize has already been added to thread_allocated. */
uint64_t bytes_after = te_ctx_current_bytes_get(ctx);
/* The subtraction is intentionally susceptible to underflow. */
uint64_t accumbytes = bytes_after - te_ctx_last_event_get(ctx);
te_ctx_last_event_set(ctx, bytes_after);
bool allow_event_trigger = tsd_nominal(tsd) &&
tsd_reentrancy_level_get(tsd) == 0;
bool is_alloc = ctx->is_alloc;
uint64_t wait = TE_MAX_START_WAIT;
#define E(event, condition, alloc_event) \
bool is_##event##_triggered = false; \
if (is_alloc == alloc_event && condition) { \
uint64_t event_wait = event##_event_wait_get(tsd); \
assert(event_wait <= TE_MAX_START_WAIT); \
if (event_wait > accumbytes) { \
event_wait -= accumbytes; \
} else if (!allow_event_trigger) { \
event_wait = event##_postponed_event_wait(tsd); \
} else { \
is_##event##_triggered = true; \
event_wait = event##_new_event_wait(tsd); \
} \
event_wait = te_clip_event_wait(event_wait); \
event##_event_wait_set(tsd, event_wait); \
if (event_wait < wait) { \
wait = event_wait; \
} \
}
ITERATE_OVER_ALL_EVENTS
#undef E
assert(wait <= TE_MAX_START_WAIT);
te_adjust_thresholds_helper(tsd, ctx, wait);
te_assert_invariants(tsd);
#define E(event, condition, alloc_event) \
if (is_alloc == alloc_event && condition && \
is_##event##_triggered) { \
assert(allow_event_trigger); \
uint64_t elapsed = event##_fetch_elapsed(tsd); \
event##_event_handler(tsd, elapsed); \
}
ITERATE_OVER_ALL_EVENTS
#undef E
te_assert_invariants(tsd);
}
static void
te_init(tsd_t *tsd, bool is_alloc) {
te_ctx_t ctx;
te_ctx_get(tsd, &ctx, is_alloc);
/*
* Reset the last event to current, which starts the events from a clean
* state. This is necessary when re-init the tsd event counters.
*
* The event counters maintain a relationship with the current bytes:
* last_event <= current < next_event. When a reinit happens (e.g.
* reincarnated tsd), the last event needs progressing because all
* events start fresh from the current bytes.
*/
te_ctx_last_event_set(&ctx, te_ctx_current_bytes_get(&ctx));
uint64_t wait = TE_MAX_START_WAIT;
#define E(event, condition, alloc_event) \
if (is_alloc == alloc_event && condition) { \
uint64_t event_wait = event##_new_event_wait(tsd); \
event_wait = te_clip_event_wait(event_wait); \
event##_event_wait_set(tsd, event_wait); \
if (event_wait < wait) { \
wait = event_wait; \
} \
}
ITERATE_OVER_ALL_EVENTS
#undef E
te_adjust_thresholds_helper(tsd, &ctx, wait);
}
void
tsd_te_init(tsd_t *tsd) {
/* Make sure no overflow for the bytes accumulated on event_trigger. */
assert(TE_MAX_INTERVAL <= UINT64_MAX - SC_LARGE_MAXCLASS + 1);
te_init(tsd, true);
te_init(tsd, false);
te_assert_invariants(tsd);
}

View file

@ -0,0 +1,32 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
/*
* To avoid using floating point math down core paths (still necessary because
* versions of the glibc dynamic loader that did not preserve xmm registers are
* still somewhat common, requiring us to be compilable with -mno-sse), and also
* to avoid generally expensive library calls, we use a precomputed table of
* values. We want to sample U uniformly on [0, 1], and then compute
* ceil(log(u)/log(1-1/nticks)). We're mostly interested in the case where
* nticks is reasonably big, so 1/log(1-1/nticks) is well-approximated by
* -nticks.
*
* To compute log(u), we sample an integer in [1, 64] and divide, then just look
* up results in a table. As a space-compression mechanism, we store these as
* uint8_t by dividing the range (255) by the highest-magnitude value the log
* can take on, and using that as a multiplier. We then have to divide by that
* multiplier at the end of the computation.
*
* The values here are computed in src/ticker.py
*/
const uint8_t ticker_geom_table[1 << TICKER_GEOM_NBITS] = {
254, 211, 187, 169, 156, 144, 135, 127,
120, 113, 107, 102, 97, 93, 89, 85,
81, 77, 74, 71, 68, 65, 62, 60,
57, 55, 53, 50, 48, 46, 44, 42,
40, 39, 37, 35, 33, 32, 30, 29,
27, 26, 24, 23, 21, 20, 19, 18,
16, 15, 14, 13, 12, 10, 9, 8,
7, 6, 5, 4, 3, 2, 1, 0
};

View file

@ -0,0 +1,15 @@
#!/usr/bin/env python3
import math
# Must match TICKER_GEOM_NBITS
lg_table_size = 6
table_size = 2**lg_table_size
byte_max = 255
mul = math.floor(-byte_max/math.log(1 / table_size))
values = [round(-mul * math.log(i / table_size))
for i in range(1, table_size+1)]
print("mul =", mul)
print("values:")
for i in range(table_size // 8):
print(", ".join((str(x) for x in values[i*8 : i*8 + 8])))

549
BeefRT/JEMalloc/src/tsd.c Normal file
View file

@ -0,0 +1,549 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/san.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/rtree.h"
/******************************************************************************/
/* Data. */
/* TSD_INITIALIZER triggers "-Wmissing-field-initializer" */
JEMALLOC_DIAGNOSTIC_PUSH
JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
JEMALLOC_TSD_TYPE_ATTR(tsd_t) tsd_tls = TSD_INITIALIZER;
JEMALLOC_TSD_TYPE_ATTR(bool) JEMALLOC_TLS_MODEL tsd_initialized = false;
bool tsd_booted = false;
#elif (defined(JEMALLOC_TLS))
JEMALLOC_TSD_TYPE_ATTR(tsd_t) tsd_tls = TSD_INITIALIZER;
pthread_key_t tsd_tsd;
bool tsd_booted = false;
#elif (defined(_WIN32))
DWORD tsd_tsd;
tsd_wrapper_t tsd_boot_wrapper = {false, TSD_INITIALIZER};
bool tsd_booted = false;
#else
/*
* This contains a mutex, but it's pretty convenient to allow the mutex code to
* have a dependency on tsd. So we define the struct here, and only refer to it
* by pointer in the header.
*/
struct tsd_init_head_s {
ql_head(tsd_init_block_t) blocks;
malloc_mutex_t lock;
};
pthread_key_t tsd_tsd;
tsd_init_head_t tsd_init_head = {
ql_head_initializer(blocks),
MALLOC_MUTEX_INITIALIZER
};
tsd_wrapper_t tsd_boot_wrapper = {
false,
TSD_INITIALIZER
};
bool tsd_booted = false;
#endif
JEMALLOC_DIAGNOSTIC_POP
/******************************************************************************/
/* A list of all the tsds in the nominal state. */
typedef ql_head(tsd_t) tsd_list_t;
static tsd_list_t tsd_nominal_tsds = ql_head_initializer(tsd_nominal_tsds);
static malloc_mutex_t tsd_nominal_tsds_lock;
/* How many slow-path-enabling features are turned on. */
static atomic_u32_t tsd_global_slow_count = ATOMIC_INIT(0);
static bool
tsd_in_nominal_list(tsd_t *tsd) {
tsd_t *tsd_list;
bool found = false;
/*
* We don't know that tsd is nominal; it might not be safe to get data
* out of it here.
*/
malloc_mutex_lock(TSDN_NULL, &tsd_nominal_tsds_lock);
ql_foreach(tsd_list, &tsd_nominal_tsds, TSD_MANGLE(tsd_link)) {
if (tsd == tsd_list) {
found = true;
break;
}
}
malloc_mutex_unlock(TSDN_NULL, &tsd_nominal_tsds_lock);
return found;
}
static void
tsd_add_nominal(tsd_t *tsd) {
assert(!tsd_in_nominal_list(tsd));
assert(tsd_state_get(tsd) <= tsd_state_nominal_max);
ql_elm_new(tsd, TSD_MANGLE(tsd_link));
malloc_mutex_lock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
ql_tail_insert(&tsd_nominal_tsds, tsd, TSD_MANGLE(tsd_link));
malloc_mutex_unlock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
}
static void
tsd_remove_nominal(tsd_t *tsd) {
assert(tsd_in_nominal_list(tsd));
assert(tsd_state_get(tsd) <= tsd_state_nominal_max);
malloc_mutex_lock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
ql_remove(&tsd_nominal_tsds, tsd, TSD_MANGLE(tsd_link));
malloc_mutex_unlock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
}
static void
tsd_force_recompute(tsdn_t *tsdn) {
/*
* The stores to tsd->state here need to synchronize with the exchange
* in tsd_slow_update.
*/
atomic_fence(ATOMIC_RELEASE);
malloc_mutex_lock(tsdn, &tsd_nominal_tsds_lock);
tsd_t *remote_tsd;
ql_foreach(remote_tsd, &tsd_nominal_tsds, TSD_MANGLE(tsd_link)) {
assert(tsd_atomic_load(&remote_tsd->state, ATOMIC_RELAXED)
<= tsd_state_nominal_max);
tsd_atomic_store(&remote_tsd->state,
tsd_state_nominal_recompute, ATOMIC_RELAXED);
/* See comments in te_recompute_fast_threshold(). */
atomic_fence(ATOMIC_SEQ_CST);
te_next_event_fast_set_non_nominal(remote_tsd);
}
malloc_mutex_unlock(tsdn, &tsd_nominal_tsds_lock);
}
void
tsd_global_slow_inc(tsdn_t *tsdn) {
atomic_fetch_add_u32(&tsd_global_slow_count, 1, ATOMIC_RELAXED);
/*
* We unconditionally force a recompute, even if the global slow count
* was already positive. If we didn't, then it would be possible for us
* to return to the user, have the user synchronize externally with some
* other thread, and then have that other thread not have picked up the
* update yet (since the original incrementing thread might still be
* making its way through the tsd list).
*/
tsd_force_recompute(tsdn);
}
void tsd_global_slow_dec(tsdn_t *tsdn) {
atomic_fetch_sub_u32(&tsd_global_slow_count, 1, ATOMIC_RELAXED);
/* See the note in ..._inc(). */
tsd_force_recompute(tsdn);
}
static bool
tsd_local_slow(tsd_t *tsd) {
return !tsd_tcache_enabled_get(tsd)
|| tsd_reentrancy_level_get(tsd) > 0;
}
bool
tsd_global_slow() {
return atomic_load_u32(&tsd_global_slow_count, ATOMIC_RELAXED) > 0;
}
/******************************************************************************/
static uint8_t
tsd_state_compute(tsd_t *tsd) {
if (!tsd_nominal(tsd)) {
return tsd_state_get(tsd);
}
/* We're in *a* nominal state; but which one? */
if (malloc_slow || tsd_local_slow(tsd) || tsd_global_slow()) {
return tsd_state_nominal_slow;
} else {
return tsd_state_nominal;
}
}
void
tsd_slow_update(tsd_t *tsd) {
uint8_t old_state;
do {
uint8_t new_state = tsd_state_compute(tsd);
old_state = tsd_atomic_exchange(&tsd->state, new_state,
ATOMIC_ACQUIRE);
} while (old_state == tsd_state_nominal_recompute);
te_recompute_fast_threshold(tsd);
}
void
tsd_state_set(tsd_t *tsd, uint8_t new_state) {
/* Only the tsd module can change the state *to* recompute. */
assert(new_state != tsd_state_nominal_recompute);
uint8_t old_state = tsd_atomic_load(&tsd->state, ATOMIC_RELAXED);
if (old_state > tsd_state_nominal_max) {
/*
* Not currently in the nominal list, but it might need to be
* inserted there.
*/
assert(!tsd_in_nominal_list(tsd));
tsd_atomic_store(&tsd->state, new_state, ATOMIC_RELAXED);
if (new_state <= tsd_state_nominal_max) {
tsd_add_nominal(tsd);
}
} else {
/*
* We're currently nominal. If the new state is non-nominal,
* great; we take ourselves off the list and just enter the new
* state.
*/
assert(tsd_in_nominal_list(tsd));
if (new_state > tsd_state_nominal_max) {
tsd_remove_nominal(tsd);
tsd_atomic_store(&tsd->state, new_state,
ATOMIC_RELAXED);
} else {
/*
* This is the tricky case. We're transitioning from
* one nominal state to another. The caller can't know
* about any races that are occurring at the same time,
* so we always have to recompute no matter what.
*/
tsd_slow_update(tsd);
}
}
te_recompute_fast_threshold(tsd);
}
static void
tsd_prng_state_init(tsd_t *tsd) {
/*
* A nondeterministic seed based on the address of tsd reduces
* the likelihood of lockstep non-uniform cache index
* utilization among identical concurrent processes, but at the
* cost of test repeatability. For debug builds, instead use a
* deterministic seed.
*/
*tsd_prng_statep_get(tsd) = config_debug ? 0 :
(uint64_t)(uintptr_t)tsd;
}
static bool
tsd_data_init(tsd_t *tsd) {
/*
* We initialize the rtree context first (before the tcache), since the
* tcache initialization depends on it.
*/
rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd));
tsd_prng_state_init(tsd);
tsd_te_init(tsd); /* event_init may use the prng state above. */
tsd_san_init(tsd);
return tsd_tcache_enabled_data_init(tsd);
}
static void
assert_tsd_data_cleanup_done(tsd_t *tsd) {
assert(!tsd_nominal(tsd));
assert(!tsd_in_nominal_list(tsd));
assert(*tsd_arenap_get_unsafe(tsd) == NULL);
assert(*tsd_iarenap_get_unsafe(tsd) == NULL);
assert(*tsd_tcache_enabledp_get_unsafe(tsd) == false);
assert(*tsd_prof_tdatap_get_unsafe(tsd) == NULL);
}
static bool
tsd_data_init_nocleanup(tsd_t *tsd) {
assert(tsd_state_get(tsd) == tsd_state_reincarnated ||
tsd_state_get(tsd) == tsd_state_minimal_initialized);
/*
* During reincarnation, there is no guarantee that the cleanup function
* will be called (deallocation may happen after all tsd destructors).
* We set up tsd in a way that no cleanup is needed.
*/
rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd));
*tsd_tcache_enabledp_get_unsafe(tsd) = false;
*tsd_reentrancy_levelp_get(tsd) = 1;
tsd_prng_state_init(tsd);
tsd_te_init(tsd); /* event_init may use the prng state above. */
tsd_san_init(tsd);
assert_tsd_data_cleanup_done(tsd);
return false;
}
tsd_t *
tsd_fetch_slow(tsd_t *tsd, bool minimal) {
assert(!tsd_fast(tsd));
if (tsd_state_get(tsd) == tsd_state_nominal_slow) {
/*
* On slow path but no work needed. Note that we can't
* necessarily *assert* that we're slow, because we might be
* slow because of an asynchronous modification to global state,
* which might be asynchronously modified *back*.
*/
} else if (tsd_state_get(tsd) == tsd_state_nominal_recompute) {
tsd_slow_update(tsd);
} else if (tsd_state_get(tsd) == tsd_state_uninitialized) {
if (!minimal) {
if (tsd_booted) {
tsd_state_set(tsd, tsd_state_nominal);
tsd_slow_update(tsd);
/* Trigger cleanup handler registration. */
tsd_set(tsd);
tsd_data_init(tsd);
}
} else {
tsd_state_set(tsd, tsd_state_minimal_initialized);
tsd_set(tsd);
tsd_data_init_nocleanup(tsd);
}
} else if (tsd_state_get(tsd) == tsd_state_minimal_initialized) {
if (!minimal) {
/* Switch to fully initialized. */
tsd_state_set(tsd, tsd_state_nominal);
assert(*tsd_reentrancy_levelp_get(tsd) >= 1);
(*tsd_reentrancy_levelp_get(tsd))--;
tsd_slow_update(tsd);
tsd_data_init(tsd);
} else {
assert_tsd_data_cleanup_done(tsd);
}
} else if (tsd_state_get(tsd) == tsd_state_purgatory) {
tsd_state_set(tsd, tsd_state_reincarnated);
tsd_set(tsd);
tsd_data_init_nocleanup(tsd);
} else {
assert(tsd_state_get(tsd) == tsd_state_reincarnated);
}
return tsd;
}
void *
malloc_tsd_malloc(size_t size) {
return a0malloc(CACHELINE_CEILING(size));
}
void
malloc_tsd_dalloc(void *wrapper) {
a0dalloc(wrapper);
}
#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32)
static unsigned ncleanups;
static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX];
#ifndef _WIN32
JEMALLOC_EXPORT
#endif
void
_malloc_thread_cleanup(void) {
bool pending[MALLOC_TSD_CLEANUPS_MAX], again;
unsigned i;
for (i = 0; i < ncleanups; i++) {
pending[i] = true;
}
do {
again = false;
for (i = 0; i < ncleanups; i++) {
if (pending[i]) {
pending[i] = cleanups[i]();
if (pending[i]) {
again = true;
}
}
}
} while (again);
}
#ifndef _WIN32
JEMALLOC_EXPORT
#endif
void
_malloc_tsd_cleanup_register(bool (*f)(void)) {
assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX);
cleanups[ncleanups] = f;
ncleanups++;
}
#endif
static void
tsd_do_data_cleanup(tsd_t *tsd) {
prof_tdata_cleanup(tsd);
iarena_cleanup(tsd);
arena_cleanup(tsd);
tcache_cleanup(tsd);
witnesses_cleanup(tsd_witness_tsdp_get_unsafe(tsd));
*tsd_reentrancy_levelp_get(tsd) = 1;
}
void
tsd_cleanup(void *arg) {
tsd_t *tsd = (tsd_t *)arg;
switch (tsd_state_get(tsd)) {
case tsd_state_uninitialized:
/* Do nothing. */
break;
case tsd_state_minimal_initialized:
/* This implies the thread only did free() in its life time. */
/* Fall through. */
case tsd_state_reincarnated:
/*
* Reincarnated means another destructor deallocated memory
* after the destructor was called. Cleanup isn't required but
* is still called for testing and completeness.
*/
assert_tsd_data_cleanup_done(tsd);
JEMALLOC_FALLTHROUGH;
case tsd_state_nominal:
case tsd_state_nominal_slow:
tsd_do_data_cleanup(tsd);
tsd_state_set(tsd, tsd_state_purgatory);
tsd_set(tsd);
break;
case tsd_state_purgatory:
/*
* The previous time this destructor was called, we set the
* state to tsd_state_purgatory so that other destructors
* wouldn't cause re-creation of the tsd. This time, do
* nothing, and do not request another callback.
*/
break;
default:
not_reached();
}
#ifdef JEMALLOC_JET
test_callback_t test_callback = *tsd_test_callbackp_get_unsafe(tsd);
int *data = tsd_test_datap_get_unsafe(tsd);
if (test_callback != NULL) {
test_callback(data);
}
#endif
}
tsd_t *
malloc_tsd_boot0(void) {
tsd_t *tsd;
#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32)
ncleanups = 0;
#endif
if (malloc_mutex_init(&tsd_nominal_tsds_lock, "tsd_nominal_tsds_lock",
WITNESS_RANK_OMIT, malloc_mutex_rank_exclusive)) {
return NULL;
}
if (tsd_boot0()) {
return NULL;
}
tsd = tsd_fetch();
return tsd;
}
void
malloc_tsd_boot1(void) {
tsd_boot1();
tsd_t *tsd = tsd_fetch();
/* malloc_slow has been set properly. Update tsd_slow. */
tsd_slow_update(tsd);
}
#ifdef _WIN32
static BOOL WINAPI
_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) {
switch (fdwReason) {
#ifdef JEMALLOC_LAZY_LOCK
case DLL_THREAD_ATTACH:
isthreaded = true;
break;
#endif
case DLL_THREAD_DETACH:
_malloc_thread_cleanup();
break;
default:
break;
}
return true;
}
/*
* We need to be able to say "read" here (in the "pragma section"), but have
* hooked "read". We won't read for the rest of the file, so we can get away
* with unhooking.
*/
#ifdef read
# undef read
#endif
#ifdef _MSC_VER
# ifdef _M_IX86
# pragma comment(linker, "/INCLUDE:__tls_used")
# pragma comment(linker, "/INCLUDE:_tls_callback")
# else
# pragma comment(linker, "/INCLUDE:_tls_used")
# pragma comment(linker, "/INCLUDE:" STRINGIFY(tls_callback) )
# endif
# pragma section(".CRT$XLY",long,read)
#endif
JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used)
BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL,
DWORD fdwReason, LPVOID lpvReserved) = _tls_callback;
#endif
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
void *
tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) {
pthread_t self = pthread_self();
tsd_init_block_t *iter;
/* Check whether this thread has already inserted into the list. */
malloc_mutex_lock(TSDN_NULL, &head->lock);
ql_foreach(iter, &head->blocks, link) {
if (iter->thread == self) {
malloc_mutex_unlock(TSDN_NULL, &head->lock);
return iter->data;
}
}
/* Insert block into list. */
ql_elm_new(block, link);
block->thread = self;
ql_tail_insert(&head->blocks, block, link);
malloc_mutex_unlock(TSDN_NULL, &head->lock);
return NULL;
}
void
tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block) {
malloc_mutex_lock(TSDN_NULL, &head->lock);
ql_remove(&head->blocks, block, link);
malloc_mutex_unlock(TSDN_NULL, &head->lock);
}
#endif
void
tsd_prefork(tsd_t *tsd) {
malloc_mutex_prefork(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
}
void
tsd_postfork_parent(tsd_t *tsd) {
malloc_mutex_postfork_parent(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
}
void
tsd_postfork_child(tsd_t *tsd) {
malloc_mutex_postfork_child(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
ql_new(&tsd_nominal_tsds);
if (tsd_state_get(tsd) <= tsd_state_nominal_max) {
tsd_add_nominal(tsd);
}
}

View file

@ -0,0 +1,122 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/malloc_io.h"
void
witness_init(witness_t *witness, const char *name, witness_rank_t rank,
witness_comp_t *comp, void *opaque) {
witness->name = name;
witness->rank = rank;
witness->comp = comp;
witness->opaque = opaque;
}
static void
witness_print_witness(witness_t *w, unsigned n) {
assert(n > 0);
if (n == 1) {
malloc_printf(" %s(%u)", w->name, w->rank);
} else {
malloc_printf(" %s(%u)X%u", w->name, w->rank, n);
}
}
static void
witness_print_witnesses(const witness_list_t *witnesses) {
witness_t *w, *last = NULL;
unsigned n = 0;
ql_foreach(w, witnesses, link) {
if (last != NULL && w->rank > last->rank) {
assert(w->name != last->name);
witness_print_witness(last, n);
n = 0;
} else if (last != NULL) {
assert(w->rank == last->rank);
assert(w->name == last->name);
}
last = w;
++n;
}
if (last != NULL) {
witness_print_witness(last, n);
}
}
static void
witness_lock_error_impl(const witness_list_t *witnesses,
const witness_t *witness) {
malloc_printf("<jemalloc>: Lock rank order reversal:");
witness_print_witnesses(witnesses);
malloc_printf(" %s(%u)\n", witness->name, witness->rank);
abort();
}
witness_lock_error_t *JET_MUTABLE witness_lock_error = witness_lock_error_impl;
static void
witness_owner_error_impl(const witness_t *witness) {
malloc_printf("<jemalloc>: Should own %s(%u)\n", witness->name,
witness->rank);
abort();
}
witness_owner_error_t *JET_MUTABLE witness_owner_error =
witness_owner_error_impl;
static void
witness_not_owner_error_impl(const witness_t *witness) {
malloc_printf("<jemalloc>: Should not own %s(%u)\n", witness->name,
witness->rank);
abort();
}
witness_not_owner_error_t *JET_MUTABLE witness_not_owner_error =
witness_not_owner_error_impl;
static void
witness_depth_error_impl(const witness_list_t *witnesses,
witness_rank_t rank_inclusive, unsigned depth) {
malloc_printf("<jemalloc>: Should own %u lock%s of rank >= %u:", depth,
(depth != 1) ? "s" : "", rank_inclusive);
witness_print_witnesses(witnesses);
malloc_printf("\n");
abort();
}
witness_depth_error_t *JET_MUTABLE witness_depth_error =
witness_depth_error_impl;
void
witnesses_cleanup(witness_tsd_t *witness_tsd) {
witness_assert_lockless(witness_tsd_tsdn(witness_tsd));
/* Do nothing. */
}
void
witness_prefork(witness_tsd_t *witness_tsd) {
if (!config_debug) {
return;
}
witness_tsd->forking = true;
}
void
witness_postfork_parent(witness_tsd_t *witness_tsd) {
if (!config_debug) {
return;
}
witness_tsd->forking = false;
}
void
witness_postfork_child(witness_tsd_t *witness_tsd) {
if (!config_debug) {
return;
}
#ifndef JEMALLOC_MUTEX_INIT_CB
witness_list_t *witnesses;
witnesses = &witness_tsd->witnesses;
ql_new(witnesses);
#endif
witness_tsd->forking = false;
}

469
BeefRT/JEMalloc/src/zone.c Normal file
View file

@ -0,0 +1,469 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#ifndef JEMALLOC_ZONE
# error "This source file is for zones on Darwin (OS X)."
#endif
/* Definitions of the following structs in malloc/malloc.h might be too old
* for the built binary to run on newer versions of OSX. So use the newest
* possible version of those structs.
*/
typedef struct _malloc_zone_t {
void *reserved1;
void *reserved2;
size_t (*size)(struct _malloc_zone_t *, const void *);
void *(*malloc)(struct _malloc_zone_t *, size_t);
void *(*calloc)(struct _malloc_zone_t *, size_t, size_t);
void *(*valloc)(struct _malloc_zone_t *, size_t);
void (*free)(struct _malloc_zone_t *, void *);
void *(*realloc)(struct _malloc_zone_t *, void *, size_t);
void (*destroy)(struct _malloc_zone_t *);
const char *zone_name;
unsigned (*batch_malloc)(struct _malloc_zone_t *, size_t, void **, unsigned);
void (*batch_free)(struct _malloc_zone_t *, void **, unsigned);
struct malloc_introspection_t *introspect;
unsigned version;
void *(*memalign)(struct _malloc_zone_t *, size_t, size_t);
void (*free_definite_size)(struct _malloc_zone_t *, void *, size_t);
size_t (*pressure_relief)(struct _malloc_zone_t *, size_t);
} malloc_zone_t;
typedef struct {
vm_address_t address;
vm_size_t size;
} vm_range_t;
typedef struct malloc_statistics_t {
unsigned blocks_in_use;
size_t size_in_use;
size_t max_size_in_use;
size_t size_allocated;
} malloc_statistics_t;
typedef kern_return_t memory_reader_t(task_t, vm_address_t, vm_size_t, void **);
typedef void vm_range_recorder_t(task_t, void *, unsigned type, vm_range_t *, unsigned);
typedef struct malloc_introspection_t {
kern_return_t (*enumerator)(task_t, void *, unsigned, vm_address_t, memory_reader_t, vm_range_recorder_t);
size_t (*good_size)(malloc_zone_t *, size_t);
boolean_t (*check)(malloc_zone_t *);
void (*print)(malloc_zone_t *, boolean_t);
void (*log)(malloc_zone_t *, void *);
void (*force_lock)(malloc_zone_t *);
void (*force_unlock)(malloc_zone_t *);
void (*statistics)(malloc_zone_t *, malloc_statistics_t *);
boolean_t (*zone_locked)(malloc_zone_t *);
boolean_t (*enable_discharge_checking)(malloc_zone_t *);
boolean_t (*disable_discharge_checking)(malloc_zone_t *);
void (*discharge)(malloc_zone_t *, void *);
#ifdef __BLOCKS__
void (*enumerate_discharged_pointers)(malloc_zone_t *, void (^)(void *, void *));
#else
void *enumerate_unavailable_without_blocks;
#endif
void (*reinit_lock)(malloc_zone_t *);
} malloc_introspection_t;
extern kern_return_t malloc_get_all_zones(task_t, memory_reader_t, vm_address_t **, unsigned *);
extern malloc_zone_t *malloc_default_zone(void);
extern void malloc_zone_register(malloc_zone_t *zone);
extern void malloc_zone_unregister(malloc_zone_t *zone);
/*
* The malloc_default_purgeable_zone() function is only available on >= 10.6.
* We need to check whether it is present at runtime, thus the weak_import.
*/
extern malloc_zone_t *malloc_default_purgeable_zone(void)
JEMALLOC_ATTR(weak_import);
/******************************************************************************/
/* Data. */
static malloc_zone_t *default_zone, *purgeable_zone;
static malloc_zone_t jemalloc_zone;
static struct malloc_introspection_t jemalloc_zone_introspect;
static pid_t zone_force_lock_pid = -1;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static size_t zone_size(malloc_zone_t *zone, const void *ptr);
static void *zone_malloc(malloc_zone_t *zone, size_t size);
static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size);
static void *zone_valloc(malloc_zone_t *zone, size_t size);
static void zone_free(malloc_zone_t *zone, void *ptr);
static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
static void *zone_memalign(malloc_zone_t *zone, size_t alignment,
size_t size);
static void zone_free_definite_size(malloc_zone_t *zone, void *ptr,
size_t size);
static void zone_destroy(malloc_zone_t *zone);
static unsigned zone_batch_malloc(struct _malloc_zone_t *zone, size_t size,
void **results, unsigned num_requested);
static void zone_batch_free(struct _malloc_zone_t *zone,
void **to_be_freed, unsigned num_to_be_freed);
static size_t zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal);
static size_t zone_good_size(malloc_zone_t *zone, size_t size);
static kern_return_t zone_enumerator(task_t task, void *data, unsigned type_mask,
vm_address_t zone_address, memory_reader_t reader,
vm_range_recorder_t recorder);
static boolean_t zone_check(malloc_zone_t *zone);
static void zone_print(malloc_zone_t *zone, boolean_t verbose);
static void zone_log(malloc_zone_t *zone, void *address);
static void zone_force_lock(malloc_zone_t *zone);
static void zone_force_unlock(malloc_zone_t *zone);
static void zone_statistics(malloc_zone_t *zone,
malloc_statistics_t *stats);
static boolean_t zone_locked(malloc_zone_t *zone);
static void zone_reinit_lock(malloc_zone_t *zone);
/******************************************************************************/
/*
* Functions.
*/
static size_t
zone_size(malloc_zone_t *zone, const void *ptr) {
/*
* There appear to be places within Darwin (such as setenv(3)) that
* cause calls to this function with pointers that *no* zone owns. If
* we knew that all pointers were owned by *some* zone, we could split
* our zone into two parts, and use one as the default allocator and
* the other as the default deallocator/reallocator. Since that will
* not work in practice, we must check all pointers to assure that they
* reside within a mapped extent before determining size.
*/
return ivsalloc(tsdn_fetch(), ptr);
}
static void *
zone_malloc(malloc_zone_t *zone, size_t size) {
return je_malloc(size);
}
static void *
zone_calloc(malloc_zone_t *zone, size_t num, size_t size) {
return je_calloc(num, size);
}
static void *
zone_valloc(malloc_zone_t *zone, size_t size) {
void *ret = NULL; /* Assignment avoids useless compiler warning. */
je_posix_memalign(&ret, PAGE, size);
return ret;
}
static void
zone_free(malloc_zone_t *zone, void *ptr) {
if (ivsalloc(tsdn_fetch(), ptr) != 0) {
je_free(ptr);
return;
}
free(ptr);
}
static void *
zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
if (ivsalloc(tsdn_fetch(), ptr) != 0) {
return je_realloc(ptr, size);
}
return realloc(ptr, size);
}
static void *
zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) {
void *ret = NULL; /* Assignment avoids useless compiler warning. */
je_posix_memalign(&ret, alignment, size);
return ret;
}
static void
zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) {
size_t alloc_size;
alloc_size = ivsalloc(tsdn_fetch(), ptr);
if (alloc_size != 0) {
assert(alloc_size == size);
je_free(ptr);
return;
}
free(ptr);
}
static void
zone_destroy(malloc_zone_t *zone) {
/* This function should never be called. */
not_reached();
}
static unsigned
zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results,
unsigned num_requested) {
unsigned i;
for (i = 0; i < num_requested; i++) {
results[i] = je_malloc(size);
if (!results[i])
break;
}
return i;
}
static void
zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed,
unsigned num_to_be_freed) {
unsigned i;
for (i = 0; i < num_to_be_freed; i++) {
zone_free(zone, to_be_freed[i]);
to_be_freed[i] = NULL;
}
}
static size_t
zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal) {
return 0;
}
static size_t
zone_good_size(malloc_zone_t *zone, size_t size) {
if (size == 0) {
size = 1;
}
return sz_s2u(size);
}
static kern_return_t
zone_enumerator(task_t task, void *data, unsigned type_mask,
vm_address_t zone_address, memory_reader_t reader,
vm_range_recorder_t recorder) {
return KERN_SUCCESS;
}
static boolean_t
zone_check(malloc_zone_t *zone) {
return true;
}
static void
zone_print(malloc_zone_t *zone, boolean_t verbose) {
}
static void
zone_log(malloc_zone_t *zone, void *address) {
}
static void
zone_force_lock(malloc_zone_t *zone) {
if (isthreaded) {
/*
* See the note in zone_force_unlock, below, to see why we need
* this.
*/
assert(zone_force_lock_pid == -1);
zone_force_lock_pid = getpid();
jemalloc_prefork();
}
}
static void
zone_force_unlock(malloc_zone_t *zone) {
/*
* zone_force_lock and zone_force_unlock are the entry points to the
* forking machinery on OS X. The tricky thing is, the child is not
* allowed to unlock mutexes locked in the parent, even if owned by the
* forking thread (and the mutex type we use in OS X will fail an assert
* if we try). In the child, we can get away with reinitializing all
* the mutexes, which has the effect of unlocking them. In the parent,
* doing this would mean we wouldn't wake any waiters blocked on the
* mutexes we unlock. So, we record the pid of the current thread in
* zone_force_lock, and use that to detect if we're in the parent or
* child here, to decide which unlock logic we need.
*/
if (isthreaded) {
assert(zone_force_lock_pid != -1);
if (getpid() == zone_force_lock_pid) {
jemalloc_postfork_parent();
} else {
jemalloc_postfork_child();
}
zone_force_lock_pid = -1;
}
}
static void
zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) {
/* We make no effort to actually fill the values */
stats->blocks_in_use = 0;
stats->size_in_use = 0;
stats->max_size_in_use = 0;
stats->size_allocated = 0;
}
static boolean_t
zone_locked(malloc_zone_t *zone) {
/* Pretend no lock is being held */
return false;
}
static void
zone_reinit_lock(malloc_zone_t *zone) {
/* As of OSX 10.12, this function is only used when force_unlock would
* be used if the zone version were < 9. So just use force_unlock. */
zone_force_unlock(zone);
}
static void
zone_init(void) {
jemalloc_zone.size = zone_size;
jemalloc_zone.malloc = zone_malloc;
jemalloc_zone.calloc = zone_calloc;
jemalloc_zone.valloc = zone_valloc;
jemalloc_zone.free = zone_free;
jemalloc_zone.realloc = zone_realloc;
jemalloc_zone.destroy = zone_destroy;
jemalloc_zone.zone_name = "jemalloc_zone";
jemalloc_zone.batch_malloc = zone_batch_malloc;
jemalloc_zone.batch_free = zone_batch_free;
jemalloc_zone.introspect = &jemalloc_zone_introspect;
jemalloc_zone.version = 9;
jemalloc_zone.memalign = zone_memalign;
jemalloc_zone.free_definite_size = zone_free_definite_size;
jemalloc_zone.pressure_relief = zone_pressure_relief;
jemalloc_zone_introspect.enumerator = zone_enumerator;
jemalloc_zone_introspect.good_size = zone_good_size;
jemalloc_zone_introspect.check = zone_check;
jemalloc_zone_introspect.print = zone_print;
jemalloc_zone_introspect.log = zone_log;
jemalloc_zone_introspect.force_lock = zone_force_lock;
jemalloc_zone_introspect.force_unlock = zone_force_unlock;
jemalloc_zone_introspect.statistics = zone_statistics;
jemalloc_zone_introspect.zone_locked = zone_locked;
jemalloc_zone_introspect.enable_discharge_checking = NULL;
jemalloc_zone_introspect.disable_discharge_checking = NULL;
jemalloc_zone_introspect.discharge = NULL;
#ifdef __BLOCKS__
jemalloc_zone_introspect.enumerate_discharged_pointers = NULL;
#else
jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL;
#endif
jemalloc_zone_introspect.reinit_lock = zone_reinit_lock;
}
static malloc_zone_t *
zone_default_get(void) {
malloc_zone_t **zones = NULL;
unsigned int num_zones = 0;
/*
* On OSX 10.12, malloc_default_zone returns a special zone that is not
* present in the list of registered zones. That zone uses a "lite zone"
* if one is present (apparently enabled when malloc stack logging is
* enabled), or the first registered zone otherwise. In practice this
* means unless malloc stack logging is enabled, the first registered
* zone is the default. So get the list of zones to get the first one,
* instead of relying on malloc_default_zone.
*/
if (KERN_SUCCESS != malloc_get_all_zones(0, NULL,
(vm_address_t**)&zones, &num_zones)) {
/*
* Reset the value in case the failure happened after it was
* set.
*/
num_zones = 0;
}
if (num_zones) {
return zones[0];
}
return malloc_default_zone();
}
/* As written, this function can only promote jemalloc_zone. */
static void
zone_promote(void) {
malloc_zone_t *zone;
do {
/*
* Unregister and reregister the default zone. On OSX >= 10.6,
* unregistering takes the last registered zone and places it
* at the location of the specified zone. Unregistering the
* default zone thus makes the last registered one the default.
* On OSX < 10.6, unregistering shifts all registered zones.
* The first registered zone then becomes the default.
*/
malloc_zone_unregister(default_zone);
malloc_zone_register(default_zone);
/*
* On OSX 10.6, having the default purgeable zone appear before
* the default zone makes some things crash because it thinks it
* owns the default zone allocated pointers. We thus
* unregister/re-register it in order to ensure it's always
* after the default zone. On OSX < 10.6, there is no purgeable
* zone, so this does nothing. On OSX >= 10.6, unregistering
* replaces the purgeable zone with the last registered zone
* above, i.e. the default zone. Registering it again then puts
* it at the end, obviously after the default zone.
*/
if (purgeable_zone != NULL) {
malloc_zone_unregister(purgeable_zone);
malloc_zone_register(purgeable_zone);
}
zone = zone_default_get();
} while (zone != &jemalloc_zone);
}
JEMALLOC_ATTR(constructor)
void
zone_register(void) {
/*
* If something else replaced the system default zone allocator, don't
* register jemalloc's.
*/
default_zone = zone_default_get();
if (!default_zone->zone_name || strcmp(default_zone->zone_name,
"DefaultMallocZone") != 0) {
return;
}
/*
* The default purgeable zone is created lazily by OSX's libc. It uses
* the default zone when it is created for "small" allocations
* (< 15 KiB), but assumes the default zone is a scalable_zone. This
* obviously fails when the default zone is the jemalloc zone, so
* malloc_default_purgeable_zone() is called beforehand so that the
* default purgeable zone is created when the default zone is still
* a scalable_zone. As purgeable zones only exist on >= 10.6, we need
* to check for the existence of malloc_default_purgeable_zone() at
* run time.
*/
purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL :
malloc_default_purgeable_zone();
/* Register the custom zone. At this point it won't be the default. */
zone_init();
malloc_zone_register(&jemalloc_zone);
/* Promote the custom zone to be default. */
zone_promote();
}