Reformat the codebase with the clang-format 18.

This commit is contained in:
guangli-dai 2025-06-13 12:31:12 -07:00 committed by Guangli Dai
parent 0a6215c171
commit f1bba4a87c
346 changed files with 18286 additions and 17770 deletions

File diff suppressed because it is too large Load diff

View file

@ -11,15 +11,15 @@ JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
/* This option should be opt-in only. */
#define BACKGROUND_THREAD_DEFAULT false
/* Read-only after initialization. */
bool opt_background_thread = BACKGROUND_THREAD_DEFAULT;
bool opt_background_thread = BACKGROUND_THREAD_DEFAULT;
size_t opt_max_background_threads = MAX_BACKGROUND_THREAD_LIMIT + 1;
/* Used for thread creation, termination and stats. */
malloc_mutex_t background_thread_lock;
/* Indicates global state. Atomic because decay reads this w/o locking. */
atomic_b_t background_thread_enabled_state;
size_t n_background_threads;
size_t max_background_threads;
size_t n_background_threads;
size_t max_background_threads;
/* Thread info per-index. */
background_thread_info_t *background_thread_info;
@ -32,11 +32,11 @@ static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
static void
pthread_create_wrapper_init(void) {
#ifdef JEMALLOC_LAZY_LOCK
# ifdef JEMALLOC_LAZY_LOCK
if (!isthreaded) {
isthreaded = true;
}
#endif
# endif
}
int
@ -47,9 +47,9 @@ pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr,
return pthread_create_fptr(thread, attr, start_routine, arg);
}
#ifdef JEMALLOC_HAVE_DLSYM
#include <dlfcn.h>
#endif
# ifdef JEMALLOC_HAVE_DLSYM
# include <dlfcn.h>
# endif
static bool
pthread_create_fptr_init(void) {
@ -61,17 +61,18 @@ pthread_create_fptr_init(void) {
* wrapper for pthread_create; and 2) application may define its own
* wrapper as well (and can call malloc within the wrapper).
*/
#ifdef JEMALLOC_HAVE_DLSYM
# ifdef JEMALLOC_HAVE_DLSYM
pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
if (pthread_create_fptr == NULL) {
pthread_create_fptr = dlsym(RTLD_DEFAULT, "pthread_create");
}
#else
# else
pthread_create_fptr = NULL;
#endif
# endif
if (pthread_create_fptr == NULL) {
if (config_lazy_lock) {
malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
malloc_write(
"<jemalloc>: Error in dlsym(RTLD_NEXT, "
"\"pthread_create\")\n");
abort();
} else {
@ -85,21 +86,24 @@ pthread_create_fptr_init(void) {
#endif /* JEMALLOC_PTHREAD_CREATE_WRAPPER */
#ifndef JEMALLOC_BACKGROUND_THREAD
#define NOT_REACHED { not_reached(); }
bool background_thread_create(tsd_t *tsd, unsigned arena_ind) NOT_REACHED
bool background_threads_enable(tsd_t *tsd) NOT_REACHED
bool background_threads_disable(tsd_t *tsd) NOT_REACHED
bool background_thread_is_started(background_thread_info_t *info) NOT_REACHED
void background_thread_wakeup_early(background_thread_info_t *info,
nstime_t *remaining_sleep) NOT_REACHED
void background_thread_prefork0(tsdn_t *tsdn) NOT_REACHED
void background_thread_prefork1(tsdn_t *tsdn) NOT_REACHED
void background_thread_postfork_parent(tsdn_t *tsdn) NOT_REACHED
void background_thread_postfork_child(tsdn_t *tsdn) NOT_REACHED
bool background_thread_stats_read(tsdn_t *tsdn,
background_thread_stats_t *stats) NOT_REACHED
void background_thread_ctl_init(tsdn_t *tsdn) NOT_REACHED
#undef NOT_REACHED
# define NOT_REACHED \
{ not_reached(); }
bool
background_thread_create(tsd_t *tsd, unsigned arena_ind) NOT_REACHED
bool background_threads_enable(tsd_t *tsd) NOT_REACHED
bool background_threads_disable(tsd_t *tsd) NOT_REACHED
bool background_thread_is_started(
background_thread_info_t *info) NOT_REACHED
void background_thread_wakeup_early(
background_thread_info_t *info, nstime_t *remaining_sleep) NOT_REACHED
void background_thread_prefork0(tsdn_t *tsdn) NOT_REACHED
void background_thread_prefork1(tsdn_t *tsdn) NOT_REACHED
void background_thread_postfork_parent(tsdn_t *tsdn) NOT_REACHED
void background_thread_postfork_child(tsdn_t *tsdn) NOT_REACHED
bool background_thread_stats_read(
tsdn_t *tsdn, background_thread_stats_t *stats) NOT_REACHED
void background_thread_ctl_init(tsdn_t *tsdn) NOT_REACHED
# undef NOT_REACHED
#else
static bool background_thread_enabled_at_fork;
@ -116,49 +120,50 @@ background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) {
static inline bool
set_current_thread_affinity(int cpu) {
#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY) || defined(JEMALLOC_HAVE_PTHREAD_SETAFFINITY_NP)
#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
# if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY) \
|| defined(JEMALLOC_HAVE_PTHREAD_SETAFFINITY_NP)
# if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
cpu_set_t cpuset;
#else
# ifndef __NetBSD__
# else
# ifndef __NetBSD__
cpuset_t cpuset;
# else
# else
cpuset_t *cpuset;
# endif
#endif
# endif
# endif
#ifndef __NetBSD__
# ifndef __NetBSD__
CPU_ZERO(&cpuset);
CPU_SET(cpu, &cpuset);
#else
# else
cpuset = cpuset_create();
#endif
# endif
#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
# if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
return (sched_setaffinity(0, sizeof(cpu_set_t), &cpuset) != 0);
#else
# ifndef __NetBSD__
int ret = pthread_setaffinity_np(pthread_self(), sizeof(cpuset_t),
&cpuset);
# else
int ret = pthread_setaffinity_np(pthread_self(), cpuset_size(cpuset),
cpuset);
# else
# ifndef __NetBSD__
int ret = pthread_setaffinity_np(
pthread_self(), sizeof(cpuset_t), &cpuset);
# else
int ret = pthread_setaffinity_np(
pthread_self(), cpuset_size(cpuset), cpuset);
cpuset_destroy(cpuset);
# endif
# endif
return ret != 0;
#endif
#else
return false;
#endif
# endif
# else
return false;
# endif
}
#define BILLION UINT64_C(1000000000)
# define BILLION UINT64_C(1000000000)
/* Minimal sleep interval 100 ms. */
#define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10)
# define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10)
static int
background_thread_cond_wait(background_thread_info_t *info,
struct timespec *ts) {
background_thread_cond_wait(
background_thread_info_t *info, struct timespec *ts) {
int ret;
/*
@ -177,8 +182,8 @@ background_thread_cond_wait(background_thread_info_t *info,
}
static void
background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
uint64_t interval) {
background_thread_sleep(
tsdn_t *tsdn, background_thread_info_t *info, uint64_t interval) {
if (config_stats) {
info->tot_n_runs++;
}
@ -192,21 +197,21 @@ background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
int ret;
if (interval == BACKGROUND_THREAD_INDEFINITE_SLEEP) {
background_thread_wakeup_time_set(tsdn, info,
BACKGROUND_THREAD_INDEFINITE_SLEEP);
background_thread_wakeup_time_set(
tsdn, info, BACKGROUND_THREAD_INDEFINITE_SLEEP);
ret = background_thread_cond_wait(info, NULL);
assert(ret == 0);
} else {
assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS &&
interval <= BACKGROUND_THREAD_INDEFINITE_SLEEP);
assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS
&& interval <= BACKGROUND_THREAD_INDEFINITE_SLEEP);
/* We need malloc clock (can be different from tv). */
nstime_t next_wakeup;
nstime_init_update(&next_wakeup);
nstime_iadd(&next_wakeup, interval);
assert(nstime_ns(&next_wakeup) <
BACKGROUND_THREAD_INDEFINITE_SLEEP);
background_thread_wakeup_time_set(tsdn, info,
nstime_ns(&next_wakeup));
assert(nstime_ns(&next_wakeup)
< BACKGROUND_THREAD_INDEFINITE_SLEEP);
background_thread_wakeup_time_set(
tsdn, info, nstime_ns(&next_wakeup));
nstime_t ts_wakeup;
nstime_copy(&ts_wakeup, &before_sleep);
@ -245,11 +250,11 @@ background_thread_pause_check(tsdn_t *tsdn, background_thread_info_t *info) {
}
static inline void
background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info,
unsigned ind) {
background_work_sleep_once(
tsdn_t *tsdn, background_thread_info_t *info, unsigned ind) {
uint64_t ns_until_deferred = BACKGROUND_THREAD_DEFERRED_MAX;
unsigned narenas = narenas_total_get();
bool slept_indefinitely = background_thread_indefinite_sleep(info);
bool slept_indefinitely = background_thread_indefinite_sleep(info);
for (unsigned i = ind; i < narenas; i += max_background_threads) {
arena_t *arena = arena_get(tsdn, i, false);
@ -279,11 +284,10 @@ background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info,
if (ns_until_deferred == BACKGROUND_THREAD_DEFERRED_MAX) {
sleep_ns = BACKGROUND_THREAD_INDEFINITE_SLEEP;
} else {
sleep_ns =
(ns_until_deferred < BACKGROUND_THREAD_MIN_INTERVAL_NS)
sleep_ns = (ns_until_deferred
< BACKGROUND_THREAD_MIN_INTERVAL_NS)
? BACKGROUND_THREAD_MIN_INTERVAL_NS
: ns_until_deferred;
}
background_thread_sleep(tsdn, info, sleep_ns);
@ -292,11 +296,11 @@ background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info,
static bool
background_threads_disable_single(tsd_t *tsd, background_thread_info_t *info) {
if (info == &background_thread_info[0]) {
malloc_mutex_assert_owner(tsd_tsdn(tsd),
&background_thread_lock);
malloc_mutex_assert_owner(
tsd_tsdn(tsd), &background_thread_lock);
} else {
malloc_mutex_assert_not_owner(tsd_tsdn(tsd),
&background_thread_lock);
malloc_mutex_assert_not_owner(
tsd_tsdn(tsd), &background_thread_lock);
}
pre_reentrancy(tsd, NULL);
@ -340,21 +344,23 @@ background_thread_create_signals_masked(pthread_t *thread,
sigset_t set;
sigfillset(&set);
sigset_t oldset;
int mask_err = pthread_sigmask(SIG_SETMASK, &set, &oldset);
int mask_err = pthread_sigmask(SIG_SETMASK, &set, &oldset);
if (mask_err != 0) {
return mask_err;
}
int create_err = pthread_create_wrapper(thread, attr, start_routine,
arg);
int create_err = pthread_create_wrapper(
thread, attr, start_routine, arg);
/*
* Restore the signal mask. Failure to restore the signal mask here
* changes program behavior.
*/
int restore_err = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
if (restore_err != 0) {
malloc_printf("<jemalloc>: background thread creation "
malloc_printf(
"<jemalloc>: background thread creation "
"failed (%d), and signal mask restoration failed "
"(%d)\n", create_err, restore_err);
"(%d)\n",
create_err, restore_err);
if (opt_abort) {
abort();
}
@ -364,8 +370,8 @@ background_thread_create_signals_masked(pthread_t *thread,
static bool
check_background_thread_creation(tsd_t *tsd,
const size_t const_max_background_threads,
unsigned *n_created, bool *created_threads) {
const size_t const_max_background_threads, unsigned *n_created,
bool *created_threads) {
bool ret = false;
if (likely(*n_created == n_background_threads)) {
return ret;
@ -391,7 +397,7 @@ check_background_thread_creation(tsd_t *tsd,
pre_reentrancy(tsd, NULL);
int err = background_thread_create_signals_masked(&info->thread,
/* NOLINTNEXTLINE(performance-no-int-to-ptr) */
/* NOLINTNEXTLINE(performance-no-int-to-ptr) */
NULL, background_thread_entry, (void *)(uintptr_t)i);
post_reentrancy(tsd);
@ -399,8 +405,10 @@ check_background_thread_creation(tsd_t *tsd,
(*n_created)++;
created_threads[i] = true;
} else {
malloc_printf("<jemalloc>: background thread "
"creation failed (%d)\n", err);
malloc_printf(
"<jemalloc>: background thread "
"creation failed (%d)\n",
err);
if (opt_abort) {
abort();
}
@ -434,16 +442,17 @@ background_thread0_work(tsd_t *tsd) {
/* Start working, and create more threads when asked. */
unsigned n_created = 1;
while (background_thread_info[0].state != background_thread_stopped) {
if (background_thread_pause_check(tsd_tsdn(tsd),
&background_thread_info[0])) {
if (background_thread_pause_check(
tsd_tsdn(tsd), &background_thread_info[0])) {
continue;
}
if (check_background_thread_creation(tsd, const_max_background_threads,
&n_created, (bool *)&created_threads)) {
if (check_background_thread_creation(tsd,
const_max_background_threads, &n_created,
(bool *)&created_threads)) {
continue;
}
background_work_sleep_once(tsd_tsdn(tsd),
&background_thread_info[0], 0);
background_work_sleep_once(
tsd_tsdn(tsd), &background_thread_info[0], 0);
}
/*
@ -460,8 +469,8 @@ background_thread0_work(tsd_t *tsd) {
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
if (info->state != background_thread_stopped) {
/* The thread was not created. */
assert(info->state ==
background_thread_started);
assert(
info->state == background_thread_started);
n_background_threads--;
info->state = background_thread_stopped;
}
@ -477,14 +486,14 @@ background_work(tsd_t *tsd, unsigned ind) {
background_thread_info_t *info = &background_thread_info[ind];
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
background_thread_wakeup_time_set(tsd_tsdn(tsd), info,
BACKGROUND_THREAD_INDEFINITE_SLEEP);
background_thread_wakeup_time_set(
tsd_tsdn(tsd), info, BACKGROUND_THREAD_INDEFINITE_SLEEP);
if (ind == 0) {
background_thread0_work(tsd);
} else {
while (info->state != background_thread_stopped) {
if (background_thread_pause_check(tsd_tsdn(tsd),
info)) {
if (background_thread_pause_check(
tsd_tsdn(tsd), info)) {
continue;
}
background_work_sleep_once(tsd_tsdn(tsd), info, ind);
@ -499,11 +508,11 @@ static void *
background_thread_entry(void *ind_arg) {
unsigned thread_ind = (unsigned)(uintptr_t)ind_arg;
assert(thread_ind < max_background_threads);
#ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
# ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
pthread_setname_np(pthread_self(), "jemalloc_bg_thd");
#elif defined(JEMALLOC_HAVE_PTHREAD_SET_NAME_NP)
# elif defined(JEMALLOC_HAVE_PTHREAD_SET_NAME_NP)
pthread_set_name_np(pthread_self(), "jemalloc_bg_thd");
#endif
# endif
if (opt_percpu_arena != percpu_arena_disabled) {
set_current_thread_affinity((int)thread_ind);
}
@ -513,8 +522,8 @@ background_thread_entry(void *ind_arg) {
* turn triggers another background thread creation).
*/
background_work(tsd_internal_fetch(), thread_ind);
assert(pthread_equal(pthread_self(),
background_thread_info[thread_ind].thread));
assert(pthread_equal(
pthread_self(), background_thread_info[thread_ind].thread));
return NULL;
}
@ -538,8 +547,8 @@ background_thread_create_locked(tsd_t *tsd, unsigned arena_ind) {
bool need_new_thread;
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
need_new_thread = background_thread_enabled() &&
(info->state == background_thread_stopped);
need_new_thread = background_thread_enabled()
&& (info->state == background_thread_stopped);
if (need_new_thread) {
background_thread_init(tsd, info);
}
@ -564,13 +573,15 @@ background_thread_create_locked(tsd_t *tsd, unsigned arena_ind) {
* background threads with the underlying pthread_create.
*/
int err = background_thread_create_signals_masked(&info->thread, NULL,
/* NOLINTNEXTLINE(performance-no-int-to-ptr) */
/* NOLINTNEXTLINE(performance-no-int-to-ptr) */
background_thread_entry, (void *)thread_ind);
post_reentrancy(tsd);
if (err != 0) {
malloc_printf("<jemalloc>: arena 0 background thread creation "
"failed (%d)\n", err);
malloc_printf(
"<jemalloc>: arena 0 background thread creation "
"failed (%d)\n",
err);
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
info->state = background_thread_stopped;
n_background_threads--;
@ -612,12 +623,12 @@ background_threads_enable(tsd_t *tsd) {
/* Mark the threads we need to create for thread 0. */
unsigned narenas = narenas_total_get();
for (unsigned i = 1; i < narenas; i++) {
if (marked[i % max_background_threads] ||
arena_get(tsd_tsdn(tsd), i, false) == NULL) {
if (marked[i % max_background_threads]
|| arena_get(tsd_tsdn(tsd), i, false) == NULL) {
continue;
}
background_thread_info_t *info = &background_thread_info[
i % max_background_threads];
background_thread_info_t *info =
&background_thread_info[i % max_background_threads];
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
assert(info->state == background_thread_stopped);
background_thread_init(tsd, info);
@ -635,8 +646,8 @@ background_threads_enable(tsd_t *tsd) {
for (unsigned i = 0; i < narenas; i++) {
arena_t *arena = arena_get(tsd_tsdn(tsd), i, false);
if (arena != NULL) {
pa_shard_set_deferral_allowed(tsd_tsdn(tsd),
&arena->pa_shard, true);
pa_shard_set_deferral_allowed(
tsd_tsdn(tsd), &arena->pa_shard, true);
}
}
return false;
@ -648,8 +659,8 @@ background_threads_disable(tsd_t *tsd) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
/* Thread 0 will be responsible for terminating other threads. */
if (background_threads_disable_single(tsd,
&background_thread_info[0])) {
if (background_threads_disable_single(
tsd, &background_thread_info[0])) {
return true;
}
assert(n_background_threads == 0);
@ -657,8 +668,8 @@ background_threads_disable(tsd_t *tsd) {
for (unsigned i = 0; i < narenas; i++) {
arena_t *arena = arena_get(tsd_tsdn(tsd), i, false);
if (arena != NULL) {
pa_shard_set_deferral_allowed(tsd_tsdn(tsd),
&arena->pa_shard, false);
pa_shard_set_deferral_allowed(
tsd_tsdn(tsd), &arena->pa_shard, false);
}
}
@ -671,15 +682,15 @@ background_thread_is_started(background_thread_info_t *info) {
}
void
background_thread_wakeup_early(background_thread_info_t *info,
nstime_t *remaining_sleep) {
background_thread_wakeup_early(
background_thread_info_t *info, nstime_t *remaining_sleep) {
/*
* This is an optimization to increase batching. At this point
* we know that background thread wakes up soon, so the time to cache
* the just freed memory is bounded and low.
*/
if (remaining_sleep != NULL && nstime_ns(remaining_sleep) <
BACKGROUND_THREAD_MIN_INTERVAL_NS) {
if (remaining_sleep != NULL
&& nstime_ns(remaining_sleep) < BACKGROUND_THREAD_MIN_INTERVAL_NS) {
return;
}
pthread_cond_signal(&info->cond);
@ -701,8 +712,8 @@ background_thread_prefork1(tsdn_t *tsdn) {
void
background_thread_postfork_parent(tsdn_t *tsdn) {
for (unsigned i = 0; i < max_background_threads; i++) {
malloc_mutex_postfork_parent(tsdn,
&background_thread_info[i].mtx);
malloc_mutex_postfork_parent(
tsdn, &background_thread_info[i].mtx);
}
malloc_mutex_postfork_parent(tsdn, &background_thread_lock);
}
@ -710,8 +721,8 @@ background_thread_postfork_parent(tsdn_t *tsdn) {
void
background_thread_postfork_child(tsdn_t *tsdn) {
for (unsigned i = 0; i < max_background_threads; i++) {
malloc_mutex_postfork_child(tsdn,
&background_thread_info[i].mtx);
malloc_mutex_postfork_child(
tsdn, &background_thread_info[i].mtx);
}
malloc_mutex_postfork_child(tsdn, &background_thread_lock);
if (!background_thread_enabled_at_fork) {
@ -760,8 +771,8 @@ background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
if (info->state != background_thread_stopped) {
num_runs += info->tot_n_runs;
nstime_add(&stats->run_interval, &info->tot_sleep_time);
malloc_mutex_prof_max_update(tsdn,
&stats->max_counter_per_bg_thd, &info->mtx);
malloc_mutex_prof_max_update(
tsdn, &stats->max_counter_per_bg_thd, &info->mtx);
}
malloc_mutex_unlock(tsdn, &info->mtx);
}
@ -774,9 +785,9 @@ background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
return false;
}
#undef BACKGROUND_THREAD_NPAGES_THRESHOLD
#undef BILLION
#undef BACKGROUND_THREAD_MIN_INTERVAL_NS
# undef BACKGROUND_THREAD_NPAGES_THRESHOLD
# undef BILLION
# undef BACKGROUND_THREAD_MIN_INTERVAL_NS
/*
* When lazy lock is enabled, we need to make sure setting isthreaded before
@ -787,24 +798,24 @@ background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
void
background_thread_ctl_init(tsdn_t *tsdn) {
malloc_mutex_assert_not_owner(tsdn, &background_thread_lock);
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
# ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
pthread_create_fptr_init();
pthread_create_wrapper_init();
#endif
# endif
}
#endif /* defined(JEMALLOC_BACKGROUND_THREAD) */
bool
background_thread_boot0(void) {
bool background_thread_boot0(void) {
if (!have_background_thread && opt_background_thread) {
malloc_printf("<jemalloc>: option background_thread currently "
malloc_printf(
"<jemalloc>: option background_thread currently "
"supports pthread only\n");
return true;
}
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
if ((config_lazy_lock || opt_background_thread) &&
pthread_create_fptr_init()) {
if ((config_lazy_lock || opt_background_thread)
&& pthread_create_fptr_init()) {
return true;
}
#endif
@ -823,15 +834,15 @@ background_thread_boot1(tsdn_t *tsdn, base_t *base) {
max_background_threads = opt_max_background_threads;
if (malloc_mutex_init(&background_thread_lock,
"background_thread_global",
WITNESS_RANK_BACKGROUND_THREAD_GLOBAL,
malloc_mutex_rank_exclusive)) {
"background_thread_global",
WITNESS_RANK_BACKGROUND_THREAD_GLOBAL,
malloc_mutex_rank_exclusive)) {
return true;
}
background_thread_info = (background_thread_info_t *)base_alloc(tsdn,
base, opt_max_background_threads *
sizeof(background_thread_info_t), CACHELINE);
base, opt_max_background_threads * sizeof(background_thread_info_t),
CACHELINE);
if (background_thread_info == NULL) {
return true;
}
@ -840,8 +851,8 @@ background_thread_boot1(tsdn_t *tsdn, base_t *base) {
background_thread_info_t *info = &background_thread_info[i];
/* Thread mutex is rank_inclusive because of thread0. */
if (malloc_mutex_init(&info->mtx, "background_thread",
WITNESS_RANK_BACKGROUND_THREAD,
malloc_mutex_address_ordered)) {
WITNESS_RANK_BACKGROUND_THREAD,
malloc_mutex_address_ordered)) {
return true;
}
if (pthread_cond_init(&info->cond, NULL)) {

View file

@ -12,7 +12,7 @@
* of metadata), since more metadata (e.g. rtree nodes) come from a0's base.
*/
#define BASE_AUTO_THP_THRESHOLD 2
#define BASE_AUTO_THP_THRESHOLD 2
#define BASE_AUTO_THP_THRESHOLD_A0 5
/******************************************************************************/
@ -22,25 +22,21 @@ static base_t *b0;
metadata_thp_mode_t opt_metadata_thp = METADATA_THP_DEFAULT;
const char *const metadata_thp_mode_names[] = {
"disabled",
"auto",
"always"
};
const char *const metadata_thp_mode_names[] = {"disabled", "auto", "always"};
/******************************************************************************/
static inline bool
metadata_thp_madvise(void) {
return (metadata_thp_enabled() &&
(init_system_thp_mode == thp_mode_default));
return (metadata_thp_enabled()
&& (init_system_thp_mode == thp_mode_default));
}
static void *
base_map(tsdn_t *tsdn, ehooks_t *ehooks, unsigned ind, size_t size) {
void *addr;
bool zero = true;
bool commit = true;
bool zero = true;
bool commit = true;
/*
* Use huge page sizes and alignment when opt_metadata_thp is enabled
@ -56,16 +52,16 @@ base_map(tsdn_t *tsdn, ehooks_t *ehooks, unsigned ind, size_t size) {
if (ehooks_are_default(ehooks)) {
addr = extent_alloc_mmap(NULL, size, alignment, &zero, &commit);
} else {
addr = ehooks_alloc(tsdn, ehooks, NULL, size, alignment, &zero,
&commit);
addr = ehooks_alloc(
tsdn, ehooks, NULL, size, alignment, &zero, &commit);
}
return addr;
}
static void
base_unmap(tsdn_t *tsdn, ehooks_t *ehooks, unsigned ind, void *addr,
size_t size) {
base_unmap(
tsdn_t *tsdn, ehooks_t *ehooks, unsigned ind, void *addr, size_t size) {
/*
* Cascade through dalloc, decommit, purge_forced, and purge_lazy,
* stopping at first success. This cascade is performed for consistency
@ -109,8 +105,8 @@ base_unmap(tsdn_t *tsdn, ehooks_t *ehooks, unsigned ind, void *addr,
label_done:
if (metadata_thp_madvise()) {
/* Set NOHUGEPAGE after unmap to avoid kernel defrag. */
assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 &&
(size & HUGEPAGE_MASK) == 0);
assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0
&& (size & HUGEPAGE_MASK) == 0);
pages_nohuge(addr, size);
}
}
@ -126,8 +122,8 @@ base_edata_is_reused(edata_t *edata) {
}
static void
base_edata_init(size_t *extent_sn_next, edata_t *edata, void *addr,
size_t size) {
base_edata_init(
size_t *extent_sn_next, edata_t *edata, void *addr, size_t size) {
size_t sn;
sn = *extent_sn_next;
@ -174,9 +170,9 @@ huge_arena_auto_thp_switch(tsdn_t *tsdn, pac_thp_t *pac_thp) {
unsigned cnt = 0;
edata_t *edata;
ql_foreach(edata, &pending_list->head, ql_link_active) {
ql_foreach (edata, &pending_list->head, ql_link_active) {
assert(edata != NULL);
void *addr = edata_addr_get(edata);
void *addr = edata_addr_get(edata);
size_t size = edata_size_get(edata);
assert(HUGEPAGE_ADDR2BASE(addr) == addr);
assert(HUGEPAGE_CEILING(size) == size && size != 0);
@ -196,11 +192,11 @@ base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
/* Called when adding a new block. */
bool should_switch;
if (base_ind_get(base) != 0) {
should_switch = (base_get_num_blocks(base, true) ==
BASE_AUTO_THP_THRESHOLD);
should_switch = (base_get_num_blocks(base, true)
== BASE_AUTO_THP_THRESHOLD);
} else {
should_switch = (base_get_num_blocks(base, true) ==
BASE_AUTO_THP_THRESHOLD_A0);
should_switch = (base_get_num_blocks(base, true)
== BASE_AUTO_THP_THRESHOLD_A0);
}
if (!should_switch) {
return;
@ -214,8 +210,9 @@ base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
assert((block->size & HUGEPAGE_MASK) == 0);
pages_huge(block, block->size);
if (config_stats) {
base->n_thp += HUGEPAGE_CEILING(block->size -
edata_bsize_get(&block->edata)) >> LG_HUGEPAGE;
base->n_thp += HUGEPAGE_CEILING(block->size
- edata_bsize_get(&block->edata))
>> LG_HUGEPAGE;
}
block = block->next;
assert(block == NULL || (base_ind_get(base) == 0));
@ -242,20 +239,22 @@ base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
}
static void *
base_extent_bump_alloc_helper(edata_t *edata, size_t *gap_size, size_t size,
size_t alignment) {
base_extent_bump_alloc_helper(
edata_t *edata, size_t *gap_size, size_t size, size_t alignment) {
void *ret;
assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM));
assert(size == ALIGNMENT_CEILING(size, alignment));
*gap_size = ALIGNMENT_CEILING((uintptr_t)edata_addr_get(edata),
alignment) - (uintptr_t)edata_addr_get(edata);
*gap_size = ALIGNMENT_CEILING(
(uintptr_t)edata_addr_get(edata), alignment)
- (uintptr_t)edata_addr_get(edata);
ret = (void *)((byte_t *)edata_addr_get(edata) + *gap_size);
assert(edata_bsize_get(edata) >= *gap_size + size);
edata_binit(edata, (void *)((byte_t *)edata_addr_get(edata) +
*gap_size + size), edata_bsize_get(edata) - *gap_size - size,
edata_sn_get(edata), base_edata_is_reused(edata));
edata_binit(edata,
(void *)((byte_t *)edata_addr_get(edata) + *gap_size + size),
edata_bsize_get(edata) - *gap_size - size, edata_sn_get(edata),
base_edata_is_reused(edata));
return ret;
}
@ -312,24 +311,26 @@ base_extent_bump_alloc_post(tsdn_t *tsdn, base_t *base, edata_t *edata,
* crossed by the new allocation. Adjust n_thp similarly when
* metadata_thp is enabled.
*/
base->resident += PAGE_CEILING((uintptr_t)addr + size) -
PAGE_CEILING((uintptr_t)addr - gap_size);
base->resident += PAGE_CEILING((uintptr_t)addr + size)
- PAGE_CEILING((uintptr_t)addr - gap_size);
assert(base->allocated <= base->resident);
assert(base->resident <= base->mapped);
if (metadata_thp_madvise() && (opt_metadata_thp ==
metadata_thp_always || base->auto_thp_switched)) {
if (metadata_thp_madvise()
&& (opt_metadata_thp == metadata_thp_always
|| base->auto_thp_switched)) {
base->n_thp += (HUGEPAGE_CEILING((uintptr_t)addr + size)
- HUGEPAGE_CEILING((uintptr_t)addr - gap_size)) >>
LG_HUGEPAGE;
- HUGEPAGE_CEILING(
(uintptr_t)addr - gap_size))
>> LG_HUGEPAGE;
assert(base->mapped >= base->n_thp << LG_HUGEPAGE);
}
}
}
static void *
base_extent_bump_alloc(tsdn_t *tsdn, base_t *base, edata_t *edata, size_t size,
size_t alignment) {
void *ret;
base_extent_bump_alloc(
tsdn_t *tsdn, base_t *base, edata_t *edata, size_t size, size_t alignment) {
void *ret;
size_t gap_size;
ret = base_extent_bump_alloc_helper(edata, &gap_size, size, alignment);
@ -339,9 +340,9 @@ base_extent_bump_alloc(tsdn_t *tsdn, base_t *base, edata_t *edata, size_t size,
static size_t
base_block_size_ceil(size_t block_size) {
return opt_metadata_thp == metadata_thp_disabled ?
ALIGNMENT_CEILING(block_size, BASE_BLOCK_MIN_ALIGN) :
HUGEPAGE_CEILING(block_size);
return opt_metadata_thp == metadata_thp_disabled
? ALIGNMENT_CEILING(block_size, BASE_BLOCK_MIN_ALIGN)
: HUGEPAGE_CEILING(block_size);
}
/*
@ -356,8 +357,8 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, ehooks_t *ehooks, unsigned ind,
alignment = ALIGNMENT_CEILING(alignment, QUANTUM);
size_t usize = ALIGNMENT_CEILING(size, alignment);
size_t header_size = sizeof(base_block_t);
size_t gap_size = ALIGNMENT_CEILING(header_size, alignment) -
header_size;
size_t gap_size = ALIGNMENT_CEILING(header_size, alignment)
- header_size;
/*
* Create increasingly larger blocks in order to limit the total number
* of disjoint virtual memory ranges. Choose the next size in the page
@ -365,27 +366,29 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, ehooks_t *ehooks, unsigned ind,
* HUGEPAGE when using metadata_thp), or a size large enough to satisfy
* the requested size and alignment, whichever is larger.
*/
size_t min_block_size = base_block_size_ceil(sz_psz2u(header_size +
gap_size + usize));
pszind_t pind_next = (*pind_last + 1 < sz_psz2ind(SC_LARGE_MAXCLASS)) ?
*pind_last + 1 : *pind_last;
size_t next_block_size = base_block_size_ceil(sz_pind2sz(pind_next));
size_t block_size = (min_block_size > next_block_size) ? min_block_size
: next_block_size;
base_block_t *block = (base_block_t *)base_map(tsdn, ehooks, ind,
block_size);
size_t min_block_size = base_block_size_ceil(
sz_psz2u(header_size + gap_size + usize));
pszind_t pind_next = (*pind_last + 1 < sz_psz2ind(SC_LARGE_MAXCLASS))
? *pind_last + 1
: *pind_last;
size_t next_block_size = base_block_size_ceil(sz_pind2sz(pind_next));
size_t block_size = (min_block_size > next_block_size)
? min_block_size
: next_block_size;
base_block_t *block = (base_block_t *)base_map(
tsdn, ehooks, ind, block_size);
if (block == NULL) {
return NULL;
}
if (metadata_thp_madvise()) {
void *addr = (void *)block;
assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 &&
(block_size & HUGEPAGE_MASK) == 0);
assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0
&& (block_size & HUGEPAGE_MASK) == 0);
if (opt_metadata_thp == metadata_thp_always) {
pages_huge(addr, block_size);
} else if (opt_metadata_thp == metadata_thp_auto &&
base != NULL) {
} else if (opt_metadata_thp == metadata_thp_auto
&& base != NULL) {
/* base != NULL indicates this is not a new base. */
malloc_mutex_lock(tsdn, &base->mtx);
base_auto_thp_switch(tsdn, base);
@ -432,12 +435,12 @@ base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
base->allocated += sizeof(base_block_t);
base->resident += PAGE_CEILING(sizeof(base_block_t));
base->mapped += block->size;
if (metadata_thp_madvise() &&
!(opt_metadata_thp == metadata_thp_auto
&& !base->auto_thp_switched)) {
if (metadata_thp_madvise()
&& !(opt_metadata_thp == metadata_thp_auto
&& !base->auto_thp_switched)) {
assert(base->n_thp > 0);
base->n_thp += HUGEPAGE_CEILING(sizeof(base_block_t)) >>
LG_HUGEPAGE;
base->n_thp += HUGEPAGE_CEILING(sizeof(base_block_t))
>> LG_HUGEPAGE;
}
assert(base->allocated <= base->resident);
assert(base->resident <= base->mapped);
@ -455,7 +458,7 @@ base_t *
base_new(tsdn_t *tsdn, unsigned ind, const extent_hooks_t *extent_hooks,
bool metadata_use_hooks) {
pszind_t pind_last = 0;
size_t extent_sn_next = 0;
size_t extent_sn_next = 0;
/*
* The base will contain the ehooks eventually, but it itself is
@ -463,9 +466,10 @@ base_new(tsdn_t *tsdn, unsigned ind, const extent_hooks_t *extent_hooks,
* memory, and then initialize the ehooks within the base_t.
*/
ehooks_t fake_ehooks;
ehooks_init(&fake_ehooks, metadata_use_hooks ?
(extent_hooks_t *)extent_hooks :
(extent_hooks_t *)&ehooks_default_extent_hooks, ind);
ehooks_init(&fake_ehooks,
metadata_use_hooks ? (extent_hooks_t *)extent_hooks
: (extent_hooks_t *)&ehooks_default_extent_hooks,
ind);
base_block_t *block = base_block_alloc(tsdn, NULL, &fake_ehooks, ind,
&pind_last, &extent_sn_next, sizeof(base_t), QUANTUM);
@ -473,17 +477,18 @@ base_new(tsdn_t *tsdn, unsigned ind, const extent_hooks_t *extent_hooks,
return NULL;
}
size_t gap_size;
size_t base_alignment = CACHELINE;
size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->edata,
&gap_size, base_size, base_alignment);
size_t gap_size;
size_t base_alignment = CACHELINE;
size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
base_t *base = (base_t *)base_extent_bump_alloc_helper(
&block->edata, &gap_size, base_size, base_alignment);
ehooks_init(&base->ehooks, (extent_hooks_t *)extent_hooks, ind);
ehooks_init(&base->ehooks_base, metadata_use_hooks ?
(extent_hooks_t *)extent_hooks :
(extent_hooks_t *)&ehooks_default_extent_hooks, ind);
ehooks_init(&base->ehooks_base,
metadata_use_hooks ? (extent_hooks_t *)extent_hooks
: (extent_hooks_t *)&ehooks_default_extent_hooks,
ind);
if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE,
malloc_mutex_rank_exclusive)) {
malloc_mutex_rank_exclusive)) {
base_unmap(tsdn, &fake_ehooks, ind, block, block->size);
return NULL;
}
@ -502,9 +507,10 @@ base_new(tsdn_t *tsdn, unsigned ind, const extent_hooks_t *extent_hooks,
base->allocated = sizeof(base_block_t);
base->resident = PAGE_CEILING(sizeof(base_block_t));
base->mapped = block->size;
base->n_thp = (opt_metadata_thp == metadata_thp_always) &&
metadata_thp_madvise() ? HUGEPAGE_CEILING(sizeof(base_block_t))
>> LG_HUGEPAGE : 0;
base->n_thp = (opt_metadata_thp == metadata_thp_always)
&& metadata_thp_madvise()
? HUGEPAGE_CEILING(sizeof(base_block_t)) >> LG_HUGEPAGE
: 0;
assert(base->allocated <= base->resident);
assert(base->resident <= base->mapped);
assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
@ -512,8 +518,8 @@ base_new(tsdn_t *tsdn, unsigned ind, const extent_hooks_t *extent_hooks,
/* Locking here is only necessary because of assertions. */
malloc_mutex_lock(tsdn, &base->mtx);
base_extent_bump_alloc_post(tsdn, base, &block->edata, gap_size, base,
base_size);
base_extent_bump_alloc_post(
tsdn, base, &block->edata, gap_size, base, base_size);
malloc_mutex_unlock(tsdn, &base->mtx);
return base;
@ -521,13 +527,13 @@ base_new(tsdn_t *tsdn, unsigned ind, const extent_hooks_t *extent_hooks,
void
base_delete(tsdn_t *tsdn, base_t *base) {
ehooks_t *ehooks = base_ehooks_get_for_metadata(base);
ehooks_t *ehooks = base_ehooks_get_for_metadata(base);
base_block_t *next = base->blocks;
do {
base_block_t *block = next;
next = block->next;
base_unmap(tsdn, ehooks, base_ind_get(base), block,
block->size);
base_unmap(
tsdn, ehooks, base_ind_get(base), block, block->size);
} while (next != NULL);
}
@ -543,8 +549,8 @@ base_ehooks_get_for_metadata(base_t *base) {
extent_hooks_t *
base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) {
extent_hooks_t *old_extent_hooks =
ehooks_get_extent_hooks_ptr(&base->ehooks);
extent_hooks_t *old_extent_hooks = ehooks_get_extent_hooks_ptr(
&base->ehooks);
ehooks_init(&base->ehooks, extent_hooks, ehooks_ind_get(&base->ehooks));
return old_extent_hooks;
}
@ -602,9 +608,9 @@ base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
edata_t *
base_alloc_edata(tsdn_t *tsdn, base_t *base) {
size_t esn, usize;
edata_t *edata = base_alloc_impl(tsdn, base, sizeof(edata_t),
EDATA_ALIGNMENT, &esn, &usize);
size_t esn, usize;
edata_t *edata = base_alloc_impl(
tsdn, base, sizeof(edata_t), EDATA_ALIGNMENT, &esn, &usize);
if (edata == NULL) {
return NULL;
}
@ -618,8 +624,8 @@ base_alloc_edata(tsdn_t *tsdn, base_t *base) {
void *
base_alloc_rtree(tsdn_t *tsdn, base_t *base, size_t size) {
size_t usize;
void *rtree = base_alloc_impl(tsdn, base, size, CACHELINE, NULL,
&usize);
void *rtree = base_alloc_impl(
tsdn, base, size, CACHELINE, NULL, &usize);
if (rtree == NULL) {
return NULL;
}
@ -632,8 +638,8 @@ base_alloc_rtree(tsdn_t *tsdn, base_t *base, size_t size) {
static inline void
b0_alloc_header_size(size_t *header_size, size_t *alignment) {
*alignment = QUANTUM;
*header_size = QUANTUM > sizeof(edata_t *) ? QUANTUM :
sizeof(edata_t *);
*header_size = QUANTUM > sizeof(edata_t *) ? QUANTUM
: sizeof(edata_t *);
}
/*
@ -645,7 +651,7 @@ b0_alloc_header_size(size_t *header_size, size_t *alignment) {
*/
void *
b0_alloc_tcache_stack(tsdn_t *tsdn, size_t stack_size) {
base_t *base = b0get();
base_t *base = b0get();
edata_t *edata = base_alloc_base_edata(tsdn, base);
if (edata == NULL) {
return NULL;
@ -662,8 +668,8 @@ b0_alloc_tcache_stack(tsdn_t *tsdn, size_t stack_size) {
b0_alloc_header_size(&header_size, &alignment);
size_t alloc_size = sz_s2u(stack_size + header_size);
void *addr = base_alloc_impl(tsdn, base, alloc_size, alignment, &esn,
NULL);
void *addr = base_alloc_impl(
tsdn, base, alloc_size, alignment, &esn, NULL);
if (addr == NULL) {
edata_avail_insert(&base->edata_avail, edata);
return NULL;
@ -683,8 +689,8 @@ b0_dalloc_tcache_stack(tsdn_t *tsdn, void *tcache_stack) {
b0_alloc_header_size(&header_size, &alignment);
edata_t *edata = *(edata_t **)((byte_t *)tcache_stack - header_size);
void *addr = edata_addr_get(edata);
size_t bsize = edata_bsize_get(edata);
void *addr = edata_addr_get(edata);
size_t bsize = edata_bsize_get(edata);
/* Marked as "reused" to avoid double counting stats. */
assert(base_edata_is_reused(edata));
assert(addr != NULL && bsize > 0);
@ -707,7 +713,8 @@ base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated,
malloc_mutex_lock(tsdn, &base->mtx);
assert(base->allocated <= base->resident);
assert(base->resident <= base->mapped);
assert(base->edata_allocated + base->rtree_allocated <= base->allocated);
assert(
base->edata_allocated + base->rtree_allocated <= base->allocated);
*allocated = base->allocated;
*edata_allocated = base->edata_allocated;
*rtree_allocated = base->rtree_allocated;

View file

@ -18,8 +18,8 @@ batcher_init(batcher_t *batcher, size_t nelems_max) {
* Returns an index (into some user-owned array) to use for pushing, or
* BATCHER_NO_IDX if no index is free.
*/
size_t batcher_push_begin(tsdn_t *tsdn, batcher_t *batcher,
size_t elems_to_push) {
size_t
batcher_push_begin(tsdn_t *tsdn, batcher_t *batcher, size_t elems_to_push) {
assert(elems_to_push > 0);
size_t nelems_guess = atomic_load_zu(&batcher->nelems, ATOMIC_RELAXED);
if (nelems_guess + elems_to_push > batcher->nelems_max) {
@ -37,7 +37,8 @@ size_t batcher_push_begin(tsdn_t *tsdn, batcher_t *batcher,
* racing accesses of the batcher can fail fast instead of trying to
* acquire a mutex only to discover that there's no space for them.
*/
atomic_store_zu(&batcher->nelems, nelems + elems_to_push, ATOMIC_RELAXED);
atomic_store_zu(
&batcher->nelems, nelems + elems_to_push, ATOMIC_RELAXED);
batcher->npushes++;
return nelems;
}
@ -75,7 +76,8 @@ batcher_pop_begin(tsdn_t *tsdn, batcher_t *batcher) {
return nelems;
}
void batcher_pop_end(tsdn_t *tsdn, batcher_t *batcher) {
void
batcher_pop_end(tsdn_t *tsdn, batcher_t *batcher) {
assert(atomic_load_zu(&batcher->nelems, ATOMIC_RELAXED) == 0);
malloc_mutex_unlock(tsdn, &batcher->mtx);
}

View file

@ -10,8 +10,8 @@
unsigned bin_batching_test_ndalloc_slabs_max = (unsigned)-1;
void (*bin_batching_test_after_push_hook)(size_t push_idx);
void (*bin_batching_test_mid_pop_hook)(size_t nelems_to_pop);
void (*bin_batching_test_after_unlock_hook)(unsigned slab_dalloc_count,
bool list_empty);
void (*bin_batching_test_after_unlock_hook)(
unsigned slab_dalloc_count, bool list_empty);
#endif
bool
@ -49,7 +49,7 @@ bin_shard_sizes_boot(unsigned bin_shard_sizes[SC_NBINS]) {
bool
bin_init(bin_t *bin, unsigned binind) {
if (malloc_mutex_init(&bin->lock, "bin", WITNESS_RANK_BIN,
malloc_mutex_rank_exclusive)) {
malloc_mutex_rank_exclusive)) {
return true;
}
bin->slabcur = NULL;
@ -60,8 +60,8 @@ bin_init(bin_t *bin, unsigned binind) {
}
if (arena_bin_has_batch(binind)) {
bin_with_batch_t *batched_bin = (bin_with_batch_t *)bin;
batcher_init(&batched_bin->remote_frees,
opt_bin_info_remote_free_max);
batcher_init(
&batched_bin->remote_frees, opt_bin_info_remote_free_max);
}
return false;
}

View file

@ -19,7 +19,7 @@ size_t opt_bin_info_remote_free_max = BIN_REMOTE_FREE_ELEMS_MAX;
bin_info_t bin_infos[SC_NBINS];
szind_t bin_info_nbatched_sizes;
szind_t bin_info_nbatched_sizes;
unsigned bin_info_nbatched_bins;
unsigned bin_info_nunbatched_bins;
@ -28,12 +28,12 @@ bin_infos_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
bin_info_t infos[SC_NBINS]) {
for (unsigned i = 0; i < SC_NBINS; i++) {
bin_info_t *bin_info = &infos[i];
sc_t *sc = &sc_data->sc[i];
sc_t *sc = &sc_data->sc[i];
bin_info->reg_size = ((size_t)1U << sc->lg_base)
+ ((size_t)sc->ndelta << sc->lg_delta);
bin_info->slab_size = (sc->pgs << LG_PAGE);
bin_info->nregs =
(uint32_t)(bin_info->slab_size / bin_info->reg_size);
bin_info->nregs = (uint32_t)(bin_info->slab_size
/ bin_info->reg_size);
bin_info->n_shards = bin_shard_sizes[i];
bitmap_info_t bitmap_info = BITMAP_INFO_INITIALIZER(
bin_info->nregs);

View file

@ -10,7 +10,7 @@
void
bitmap_info_init(bitmap_info_t *binfo, size_t nbits) {
unsigned i;
size_t group_count;
size_t group_count;
assert(nbits > 0);
assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
@ -24,11 +24,11 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits) {
group_count = BITMAP_BITS2GROUPS(nbits);
for (i = 1; group_count > 1; i++) {
assert(i < BITMAP_MAX_LEVELS);
binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
+ group_count;
binfo->levels[i].group_offset =
binfo->levels[i - 1].group_offset + group_count;
group_count = BITMAP_BITS2GROUPS(group_count);
}
binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
binfo->levels[i].group_offset = binfo->levels[i - 1].group_offset
+ group_count;
assert(binfo->levels[i].group_offset <= BITMAP_GROUPS_MAX);
binfo->nlevels = i;
@ -42,7 +42,7 @@ bitmap_info_ngroups(const bitmap_info_t *binfo) {
void
bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) {
size_t extra;
size_t extra;
unsigned i;
/*
@ -69,12 +69,13 @@ bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) {
bitmap[binfo->levels[1].group_offset - 1] >>= extra;
}
for (i = 1; i < binfo->nlevels; i++) {
size_t group_count = binfo->levels[i].group_offset -
binfo->levels[i-1].group_offset;
extra = (BITMAP_GROUP_NBITS - (group_count &
BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK;
size_t group_count = binfo->levels[i].group_offset
- binfo->levels[i - 1].group_offset;
extra = (BITMAP_GROUP_NBITS
- (group_count & BITMAP_GROUP_NBITS_MASK))
& BITMAP_GROUP_NBITS_MASK;
if (extra != 0) {
bitmap[binfo->levels[i+1].group_offset - 1] >>= extra;
bitmap[binfo->levels[i + 1].group_offset - 1] >>= extra;
}
}
}

View file

@ -43,8 +43,9 @@ buf_writer_init(tsdn_t *tsdn, buf_writer_t *buf_writer, write_cb_t *write_cb,
if (write_cb != NULL) {
buf_writer->write_cb = write_cb;
} else {
buf_writer->write_cb = je_malloc_message != NULL ?
je_malloc_message : wrtmessage;
buf_writer->write_cb = je_malloc_message != NULL
? je_malloc_message
: wrtmessage;
}
buf_writer->cbopaque = cbopaque;
assert(buf_len >= 2);
@ -52,8 +53,8 @@ buf_writer_init(tsdn_t *tsdn, buf_writer_t *buf_writer, write_cb_t *write_cb,
buf_writer->buf = buf;
buf_writer->internal_buf = false;
} else {
buf_writer->buf = buf_writer_allocate_internal_buf(tsdn,
buf_len);
buf_writer->buf = buf_writer_allocate_internal_buf(
tsdn, buf_len);
buf_writer->internal_buf = true;
}
if (buf_writer->buf != NULL) {
@ -111,13 +112,13 @@ buf_writer_terminate(tsdn_t *tsdn, buf_writer_t *buf_writer) {
}
void
buf_writer_pipe(buf_writer_t *buf_writer, read_cb_t *read_cb,
void *read_cbopaque) {
buf_writer_pipe(
buf_writer_t *buf_writer, read_cb_t *read_cb, void *read_cbopaque) {
/*
* A tiny local buffer in case the buffered writer failed to allocate
* at init.
*/
static char backup_buf[16];
static char backup_buf[16];
static buf_writer_t backup_buf_writer;
buf_writer_assert(buf_writer);

View file

@ -8,8 +8,7 @@
const uintptr_t disabled_bin = JUNK_ADDR;
void
cache_bin_info_init(cache_bin_info_t *info,
cache_bin_sz_t ncached_max) {
cache_bin_info_init(cache_bin_info_t *info, cache_bin_sz_t ncached_max) {
assert(ncached_max <= CACHE_BIN_NCACHED_MAX);
size_t stack_size = (size_t)ncached_max * sizeof(void *);
assert(stack_size < ((size_t)1 << (sizeof(cache_bin_sz_t) * 8)));
@ -51,27 +50,26 @@ cache_bin_info_compute_alloc(const cache_bin_info_t *infos, szind_t ninfos,
}
void
cache_bin_preincrement(const cache_bin_info_t *infos, szind_t ninfos, void *alloc,
size_t *cur_offset) {
cache_bin_preincrement(const cache_bin_info_t *infos, szind_t ninfos,
void *alloc, size_t *cur_offset) {
if (config_debug) {
size_t computed_size;
size_t computed_alignment;
/* Pointer should be as aligned as we asked for. */
cache_bin_info_compute_alloc(infos, ninfos, &computed_size,
&computed_alignment);
cache_bin_info_compute_alloc(
infos, ninfos, &computed_size, &computed_alignment);
assert(((uintptr_t)alloc & (computed_alignment - 1)) == 0);
}
*(uintptr_t *)((byte_t *)alloc + *cur_offset) =
cache_bin_preceding_junk;
*(uintptr_t *)((byte_t *)alloc
+ *cur_offset) = cache_bin_preceding_junk;
*cur_offset += sizeof(void *);
}
void
cache_bin_postincrement(void *alloc, size_t *cur_offset) {
*(uintptr_t *)((byte_t *)alloc + *cur_offset) =
cache_bin_trailing_junk;
*(uintptr_t *)((byte_t *)alloc + *cur_offset) = cache_bin_trailing_junk;
*cur_offset += sizeof(void *);
}
@ -83,8 +81,8 @@ cache_bin_init(cache_bin_t *bin, const cache_bin_info_t *info, void *alloc,
* will access the slots toward higher addresses (for the benefit of
* adjacent prefetch).
*/
void *stack_cur = (void *)((byte_t *)alloc + *cur_offset);
void *full_position = stack_cur;
void *stack_cur = (void *)((byte_t *)alloc + *cur_offset);
void *full_position = stack_cur;
cache_bin_sz_t bin_stack_size = info->ncached_max * sizeof(void *);
*cur_offset += bin_stack_size;
@ -96,8 +94,8 @@ cache_bin_init(cache_bin_t *bin, const cache_bin_info_t *info, void *alloc,
bin->low_bits_full = (cache_bin_sz_t)(uintptr_t)full_position;
bin->low_bits_empty = (cache_bin_sz_t)(uintptr_t)empty_position;
cache_bin_info_init(&bin->bin_info, info->ncached_max);
cache_bin_sz_t free_spots = cache_bin_diff(bin,
bin->low_bits_full, (cache_bin_sz_t)(uintptr_t)bin->stack_head);
cache_bin_sz_t free_spots = cache_bin_diff(bin, bin->low_bits_full,
(cache_bin_sz_t)(uintptr_t)bin->stack_head);
assert(free_spots == bin_stack_size);
if (!cache_bin_disabled(bin)) {
assert(cache_bin_ncached_get_local(bin) == 0);
@ -109,8 +107,8 @@ cache_bin_init(cache_bin_t *bin, const cache_bin_info_t *info, void *alloc,
void
cache_bin_init_disabled(cache_bin_t *bin, cache_bin_sz_t ncached_max) {
const void *fake_stack = cache_bin_disabled_bin_stack();
size_t fake_offset = 0;
const void *fake_stack = cache_bin_disabled_bin_stack();
size_t fake_offset = 0;
cache_bin_info_t fake_info;
cache_bin_info_init(&fake_info, 0);
cache_bin_init(bin, &fake_info, (void *)fake_stack, &fake_offset);

101
src/ckh.c
View file

@ -49,8 +49,8 @@
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static bool ckh_grow(tsd_t *tsd, ckh_t *ckh);
static void ckh_shrink(tsd_t *tsd, ckh_t *ckh);
static bool ckh_grow(tsd_t *tsd, ckh_t *ckh);
static void ckh_shrink(tsd_t *tsd, ckh_t *ckh);
/******************************************************************************/
@ -60,7 +60,7 @@ static void ckh_shrink(tsd_t *tsd, ckh_t *ckh);
*/
static size_t
ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) {
ckhc_t *cell;
ckhc_t *cell;
unsigned i;
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
@ -98,20 +98,20 @@ ckh_isearch(ckh_t *ckh, const void *key) {
}
static bool
ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
const void *data) {
ckhc_t *cell;
ckh_try_bucket_insert(
ckh_t *ckh, size_t bucket, const void *key, const void *data) {
ckhc_t *cell;
unsigned offset, i;
/*
* Cycle through the cells in the bucket, starting at a random position.
* The randomness avoids worst-case search overhead as buckets fill up.
*/
offset = (unsigned)prng_lg_range_u64(&ckh->prng_state,
LG_CKH_BUCKET_CELLS);
offset = (unsigned)prng_lg_range_u64(
&ckh->prng_state, LG_CKH_BUCKET_CELLS);
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS)
+ ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
if (cell->key == NULL) {
cell->key = key;
cell->data = data;
@ -130,12 +130,12 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
* eviction/relocation bucket cycle.
*/
static bool
ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
void const **argdata) {
ckh_evict_reloc_insert(
ckh_t *ckh, size_t argbucket, void const **argkey, void const **argdata) {
const void *key, *data, *tkey, *tdata;
ckhc_t *cell;
size_t hashes[2], bucket, tbucket;
unsigned i;
ckhc_t *cell;
size_t hashes[2], bucket, tbucket;
unsigned i;
bucket = argbucket;
key = *argkey;
@ -149,15 +149,18 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
* were an item for which both hashes indicated the same
* bucket.
*/
i = (unsigned)prng_lg_range_u64(&ckh->prng_state,
LG_CKH_BUCKET_CELLS);
i = (unsigned)prng_lg_range_u64(
&ckh->prng_state, LG_CKH_BUCKET_CELLS);
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
assert(cell->key != NULL);
/* Swap cell->{key,data} and {key,data} (evict). */
tkey = cell->key; tdata = cell->data;
cell->key = key; cell->data = data;
key = tkey; data = tdata;
tkey = cell->key;
tdata = cell->data;
cell->key = key;
cell->data = data;
key = tkey;
data = tdata;
#ifdef CKH_COUNT
ckh->nrelocs++;
@ -167,8 +170,8 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
ckh->hash(key, hashes);
tbucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (tbucket == bucket) {
tbucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets)
- 1);
tbucket = hashes[0]
& ((ZU(1) << ckh->lg_curbuckets) - 1);
/*
* It may be that (tbucket == bucket) still, if the
* item's hashes both indicate this bucket. However,
@ -201,8 +204,8 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
}
static bool
ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) {
size_t hashes[2], bucket;
ckh_try_insert(ckh_t *ckh, void const **argkey, void const **argdata) {
size_t hashes[2], bucket;
const void *key = *argkey;
const void *data = *argdata;
@ -232,7 +235,7 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) {
*/
static bool
ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) {
size_t count, i, nins;
size_t count, i, nins;
const void *key, *data;
count = ckh->count;
@ -254,8 +257,8 @@ ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) {
static bool
ckh_grow(tsd_t *tsd, ckh_t *ckh) {
bool ret;
ckhc_t *tab, *ttab;
bool ret;
ckhc_t *tab, *ttab;
unsigned lg_prevbuckets, lg_curcells;
#ifdef CKH_COUNT
@ -274,8 +277,7 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh) {
lg_curcells++;
usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (unlikely(usize == 0
|| usize > SC_LARGE_MAXCLASS)) {
if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
ret = true;
goto label_return;
}
@ -309,8 +311,8 @@ label_return:
static void
ckh_shrink(tsd_t *tsd, ckh_t *ckh) {
ckhc_t *tab, *ttab;
size_t usize;
ckhc_t *tab, *ttab;
size_t usize;
unsigned lg_prevbuckets, lg_curcells;
/*
@ -358,8 +360,8 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) {
bool
ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *ckh_hash,
ckh_keycomp_t *keycomp) {
bool ret;
size_t mincells, usize;
bool ret;
size_t mincells, usize;
unsigned lg_mincells;
assert(minitems > 0);
@ -386,8 +388,7 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *ckh_hash,
assert(LG_CKH_BUCKET_CELLS > 0);
mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2;
for (lg_mincells = LG_CKH_BUCKET_CELLS;
(ZU(1) << lg_mincells) < mincells;
lg_mincells++) {
(ZU(1) << lg_mincells) < mincells; lg_mincells++) {
/* Do nothing. */
}
ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
@ -417,11 +418,12 @@ ckh_delete(tsd_t *tsd, ckh_t *ckh) {
assert(ckh != NULL);
#ifdef CKH_VERBOSE
malloc_printf(
"%s(%p): ngrows: %"FMTu64", nshrinks: %"FMTu64","
" nshrinkfails: %"FMTu64", ninserts: %"FMTu64","
" nrelocs: %"FMTu64"\n", __func__, ckh,
(unsigned long long)ckh->ngrows,
malloc_printf("%s(%p): ngrows: %" FMTu64 ", nshrinks: %" FMTu64
","
" nshrinkfails: %" FMTu64 ", ninserts: %" FMTu64
","
" nrelocs: %" FMTu64 "\n",
__func__, ckh, (unsigned long long)ckh->ngrows,
(unsigned long long)ckh->nshrinks,
(unsigned long long)ckh->nshrinkfails,
(unsigned long long)ckh->ninserts,
@ -445,8 +447,9 @@ bool
ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) {
size_t i, ncells;
for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets +
LG_CKH_BUCKET_CELLS)); i < ncells; i++) {
for (i = *tabind,
ncells = (ZU(1) << (ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS));
i < ncells; i++) {
if (ckh->tab[i].key != NULL) {
if (key != NULL) {
*key = (void *)ckh->tab[i].key;
@ -486,8 +489,8 @@ label_return:
}
bool
ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
void **data) {
ckh_remove(
tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, void **data) {
size_t cell;
assert(ckh != NULL);
@ -505,9 +508,9 @@ ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
ckh->count--;
/* Try to halve the table if it is less than 1/4 full. */
if (ckh->count < (ZU(1) << (ckh->lg_curbuckets
+ LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets
> ckh->lg_minbuckets) {
if (ckh->count < (ZU(1)
<< (ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 2))
&& ckh->lg_curbuckets > ckh->lg_minbuckets) {
/* Ignore error due to OOM. */
ckh_shrink(tsd, ckh);
}
@ -554,8 +557,8 @@ ckh_string_keycomp(const void *k1, const void *k2) {
void
ckh_pointer_hash(const void *key, size_t r_hash[2]) {
union {
const void *v;
size_t i;
const void *v;
size_t i;
} u;
assert(sizeof(u.v) == sizeof(u.i));

View file

@ -6,7 +6,7 @@
bool
counter_accum_init(counter_accum_t *counter, uint64_t interval) {
if (LOCKEDINT_MTX_INIT(counter->mtx, "counter_accum",
WITNESS_RANK_COUNTER_ACCUM, malloc_mutex_rank_exclusive)) {
WITNESS_RANK_COUNTER_ACCUM, malloc_mutex_rank_exclusive)) {
return true;
}
locked_init_u64_unsynchronized(&counter->accumbytes, 0);

2262
src/ctl.c

File diff suppressed because it is too large Load diff

View file

@ -4,9 +4,8 @@
#include "jemalloc/internal/decay.h"
static const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
#define STEP(step, h, x, y) \
h,
SMOOTHSTEP
#define STEP(step, h, x, y) h,
SMOOTHSTEP
#undef STEP
};
@ -21,8 +20,9 @@ decay_deadline_init(decay_t *decay) {
if (decay_ms_read(decay) > 0) {
nstime_t jitter;
nstime_init(&jitter, prng_range_u64(&decay->jitter_state,
nstime_ns(&decay->interval)));
nstime_init(&jitter,
prng_range_u64(
&decay->jitter_state, nstime_ns(&decay->interval)));
nstime_add(&decay->deadline, &jitter);
}
}
@ -31,8 +31,8 @@ void
decay_reinit(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms) {
atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED);
if (decay_ms > 0) {
nstime_init(&decay->interval, (uint64_t)decay_ms *
KQU(1000000));
nstime_init(
&decay->interval, (uint64_t)decay_ms * KQU(1000000));
nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS);
}
@ -52,7 +52,7 @@ decay_init(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms) {
decay->ceil_npages = 0;
}
if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY,
malloc_mutex_rank_exclusive)) {
malloc_mutex_rank_exclusive)) {
return true;
}
decay->purging = false;
@ -65,8 +65,8 @@ decay_ms_valid(ssize_t decay_ms) {
if (decay_ms < -1) {
return false;
}
if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX *
KQU(1000)) {
if (decay_ms == -1
|| (uint64_t)decay_ms <= NSTIME_SEC_MAX * KQU(1000)) {
return true;
}
return false;
@ -74,8 +74,8 @@ decay_ms_valid(ssize_t decay_ms) {
static void
decay_maybe_update_time(decay_t *decay, nstime_t *new_time) {
if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch,
new_time) > 0)) {
if (unlikely(!nstime_monotonic()
&& nstime_compare(&decay->epoch, new_time) > 0)) {
/*
* Time went backwards. Move the epoch back in time and
* generate a new deadline, with the expectation that time
@ -115,11 +115,11 @@ decay_backlog_npages_limit(const decay_t *decay) {
* placed as the newest record.
*/
static void
decay_backlog_update(decay_t *decay, uint64_t nadvance_u64,
size_t current_npages) {
decay_backlog_update(
decay_t *decay, uint64_t nadvance_u64, size_t current_npages) {
if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
sizeof(size_t));
memset(decay->backlog, 0,
(SMOOTHSTEP_NSTEPS - 1) * sizeof(size_t));
} else {
size_t nadvance_z = (size_t)nadvance_u64;
@ -128,14 +128,15 @@ decay_backlog_update(decay_t *decay, uint64_t nadvance_u64,
memmove(decay->backlog, &decay->backlog[nadvance_z],
(SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
if (nadvance_z > 1) {
memset(&decay->backlog[SMOOTHSTEP_NSTEPS -
nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
memset(&decay->backlog[SMOOTHSTEP_NSTEPS - nadvance_z],
0, (nadvance_z - 1) * sizeof(size_t));
}
}
size_t npages_delta = (current_npages > decay->nunpurged) ?
current_npages - decay->nunpurged : 0;
decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta;
size_t npages_delta = (current_npages > decay->nunpurged)
? current_npages - decay->nunpurged
: 0;
decay->backlog[SMOOTHSTEP_NSTEPS - 1] = npages_delta;
if (config_debug) {
if (current_npages > decay->ceil_npages) {
@ -165,18 +166,17 @@ decay_npages_purge_in(decay_t *decay, nstime_t *time, size_t npages_new) {
npages_purge = npages_new;
} else {
uint64_t h_steps_max = h_steps[SMOOTHSTEP_NSTEPS - 1];
assert(h_steps_max >=
h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
npages_purge = npages_new * (h_steps_max -
h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
assert(h_steps_max >= h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
npages_purge = npages_new
* (h_steps_max - h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
npages_purge >>= SMOOTHSTEP_BFP;
}
return npages_purge;
}
bool
decay_maybe_advance_epoch(decay_t *decay, nstime_t *new_time,
size_t npages_current) {
decay_maybe_advance_epoch(
decay_t *decay, nstime_t *new_time, size_t npages_current) {
/* Handle possible non-monotonicity of time. */
decay_maybe_update_time(decay, new_time);
@ -202,8 +202,9 @@ decay_maybe_advance_epoch(decay_t *decay, nstime_t *new_time,
decay_backlog_update(decay, nadvance_u64, npages_current);
decay->npages_limit = decay_backlog_npages_limit(decay);
decay->nunpurged = (decay->npages_limit > npages_current) ?
decay->npages_limit : npages_current;
decay->nunpurged = (decay->npages_limit > npages_current)
? decay->npages_limit
: npages_current;
return true;
}
@ -226,21 +227,21 @@ decay_maybe_advance_epoch(decay_t *decay, nstime_t *new_time,
*/
static inline size_t
decay_npurge_after_interval(decay_t *decay, size_t interval) {
size_t i;
size_t i;
uint64_t sum = 0;
for (i = 0; i < interval; i++) {
sum += decay->backlog[i] * h_steps[i];
}
for (; i < SMOOTHSTEP_NSTEPS; i++) {
sum += decay->backlog[i] *
(h_steps[i] - h_steps[i - interval]);
sum += decay->backlog[i] * (h_steps[i] - h_steps[i - interval]);
}
return (size_t)(sum >> SMOOTHSTEP_BFP);
}
uint64_t decay_ns_until_purge(decay_t *decay, size_t npages_current,
uint64_t npages_threshold) {
uint64_t
decay_ns_until_purge(
decay_t *decay, size_t npages_current, uint64_t npages_threshold) {
if (!decay_gradually(decay)) {
return DECAY_UNBOUNDED_TIME_TO_PURGE;
}
@ -278,7 +279,7 @@ uint64_t decay_ns_until_purge(decay_t *decay, size_t npages_current,
}
unsigned n_search = 0;
size_t target, npurge;
size_t target, npurge;
while ((npurge_lb + npages_threshold < npurge_ub) && (lb + 2 < ub)) {
target = (lb + ub) / 2;
npurge = decay_npurge_after_interval(decay, target);

View file

@ -7,7 +7,7 @@ bool
ecache_init(tsdn_t *tsdn, ecache_t *ecache, extent_state_t state, unsigned ind,
bool delay_coalesce) {
if (malloc_mutex_init(&ecache->mtx, "extents", WITNESS_RANK_EXTENTS,
malloc_mutex_rank_exclusive)) {
malloc_mutex_rank_exclusive)) {
return true;
}
ecache->state = state;

View file

@ -1,6 +1,5 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
ph_gen(, edata_avail, edata_t, avail_link,
edata_esnead_comp)
ph_gen(, edata_heap, edata_t, heap_link, edata_snad_comp)
ph_gen(, edata_avail, edata_t, avail_link, edata_esnead_comp)
ph_gen(, edata_heap, edata_t, heap_link, edata_snad_comp)

View file

@ -11,7 +11,7 @@ edata_cache_init(edata_cache_t *edata_cache, base_t *base) {
*/
atomic_store_zu(&edata_cache->count, 0, ATOMIC_RELAXED);
if (malloc_mutex_init(&edata_cache->mtx, "edata_cache",
WITNESS_RANK_EDATA_CACHE, malloc_mutex_rank_exclusive)) {
WITNESS_RANK_EDATA_CACHE, malloc_mutex_rank_exclusive)) {
return true;
}
edata_cache->base = base;
@ -63,8 +63,7 @@ edata_cache_fast_init(edata_cache_fast_t *ecs, edata_cache_t *fallback) {
}
static void
edata_cache_fast_try_fill_from_fallback(tsdn_t *tsdn,
edata_cache_fast_t *ecs) {
edata_cache_fast_try_fill_from_fallback(tsdn_t *tsdn, edata_cache_fast_t *ecs) {
edata_t *edata;
malloc_mutex_lock(tsdn, &ecs->fallback->mtx);
for (int i = 0; i < EDATA_CACHE_FAST_FILL; i++) {
@ -80,8 +79,8 @@ edata_cache_fast_try_fill_from_fallback(tsdn_t *tsdn,
edata_t *
edata_cache_fast_get(tsdn_t *tsdn, edata_cache_fast_t *ecs) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_EDATA_CACHE, 0);
witness_assert_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_EDATA_CACHE, 0);
if (ecs->disabled) {
assert(edata_list_inactive_first(&ecs->list) == NULL);
@ -118,7 +117,7 @@ edata_cache_fast_flush_all(tsdn_t *tsdn, edata_cache_fast_t *ecs) {
* flush and disable pathways.
*/
edata_t *edata;
size_t nflushed = 0;
size_t nflushed = 0;
malloc_mutex_lock(tsdn, &ecs->fallback->mtx);
while ((edata = edata_list_inactive_first(&ecs->list)) != NULL) {
edata_list_inactive_remove(&ecs->list, edata);
@ -131,8 +130,8 @@ edata_cache_fast_flush_all(tsdn_t *tsdn, edata_cache_fast_t *ecs) {
void
edata_cache_fast_put(tsdn_t *tsdn, edata_cache_fast_t *ecs, edata_t *edata) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_EDATA_CACHE, 0);
witness_assert_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_EDATA_CACHE, 0);
if (ecs->disabled) {
assert(edata_list_inactive_first(&ecs->list) == NULL);

View file

@ -27,9 +27,10 @@ extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
assert(alignment != 0);
/* "primary" dss. */
if (have_dss && dss_prec == dss_prec_primary && (ret =
extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
commit)) != NULL) {
if (have_dss && dss_prec == dss_prec_primary
&& (ret = extent_alloc_dss(
tsdn, arena, new_addr, size, alignment, zero, commit))
!= NULL) {
return ret;
}
/* mmap. */
@ -38,9 +39,10 @@ extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
return ret;
}
/* "secondary" dss. */
if (have_dss && dss_prec == dss_prec_secondary && (ret =
extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
commit)) != NULL) {
if (have_dss && dss_prec == dss_prec_secondary
&& (ret = extent_alloc_dss(
tsdn, arena, new_addr, size, alignment, zero, commit))
!= NULL) {
return ret;
}
@ -54,10 +56,11 @@ ehooks_default_alloc_impl(tsdn_t *tsdn, void *new_addr, size_t size,
arena_t *arena = arena_get(tsdn, arena_ind, false);
/* NULL arena indicates arena_create. */
assert(arena != NULL || alignment == BASE_BLOCK_MIN_ALIGN);
dss_prec_t dss = (arena == NULL) ? dss_prec_disabled :
(dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_RELAXED);
void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment,
zero, commit, dss);
dss_prec_t dss = (arena == NULL)
? dss_prec_disabled
: (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_RELAXED);
void *ret = extent_alloc_core(
tsdn, arena, new_addr, size, alignment, zero, commit, dss);
if (have_madvise_huge && ret) {
pages_set_thp_state(ret, size);
}
@ -100,8 +103,8 @@ ehooks_default_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size,
bool
ehooks_default_commit_impl(void *addr, size_t offset, size_t length) {
return pages_commit((void *)((byte_t *)addr + (uintptr_t)offset),
length);
return pages_commit(
(void *)((byte_t *)addr + (uintptr_t)offset), length);
}
static bool
@ -112,8 +115,8 @@ ehooks_default_commit(extent_hooks_t *extent_hooks, void *addr, size_t size,
bool
ehooks_default_decommit_impl(void *addr, size_t offset, size_t length) {
return pages_decommit((void *)((byte_t *)addr + (uintptr_t)offset),
length);
return pages_decommit(
(void *)((byte_t *)addr + (uintptr_t)offset), length);
}
static bool
@ -125,8 +128,8 @@ ehooks_default_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size,
#ifdef PAGES_CAN_PURGE_LAZY
bool
ehooks_default_purge_lazy_impl(void *addr, size_t offset, size_t length) {
return pages_purge_lazy((void *)((byte_t *)addr + (uintptr_t)offset),
length);
return pages_purge_lazy(
(void *)((byte_t *)addr + (uintptr_t)offset), length);
}
static bool
@ -143,8 +146,8 @@ ehooks_default_purge_lazy(extent_hooks_t *extent_hooks, void *addr, size_t size,
#ifdef PAGES_CAN_PURGE_FORCED
bool
ehooks_default_purge_forced_impl(void *addr, size_t offset, size_t length) {
return pages_purge_forced((void *)((byte_t *)addr +
(uintptr_t)offset), length);
return pages_purge_forced(
(void *)((byte_t *)addr + (uintptr_t)offset), length);
}
static bool
@ -201,11 +204,11 @@ ehooks_default_merge_impl(tsdn_t *tsdn, void *addr_a, void *addr_b) {
return true;
}
if (config_debug) {
edata_t *a = emap_edata_lookup(tsdn, &arena_emap_global,
addr_a);
bool head_a = edata_is_head_get(a);
edata_t *b = emap_edata_lookup(tsdn, &arena_emap_global,
addr_b);
edata_t *a = emap_edata_lookup(
tsdn, &arena_emap_global, addr_a);
bool head_a = edata_is_head_get(a);
edata_t *b = emap_edata_lookup(
tsdn, &arena_emap_global, addr_b);
bool head_b = edata_is_head_get(b);
emap_assert_mapped(tsdn, &arena_emap_global, a);
emap_assert_mapped(tsdn, &arena_emap_global, b);
@ -254,22 +257,17 @@ ehooks_default_unguard_impl(void *guard1, void *guard2) {
pages_unmark_guards(guard1, guard2);
}
const extent_hooks_t ehooks_default_extent_hooks = {
ehooks_default_alloc,
ehooks_default_dalloc,
ehooks_default_destroy,
ehooks_default_commit,
ehooks_default_decommit,
const extent_hooks_t ehooks_default_extent_hooks = {ehooks_default_alloc,
ehooks_default_dalloc, ehooks_default_destroy, ehooks_default_commit,
ehooks_default_decommit,
#ifdef PAGES_CAN_PURGE_LAZY
ehooks_default_purge_lazy,
ehooks_default_purge_lazy,
#else
NULL,
NULL,
#endif
#ifdef PAGES_CAN_PURGE_FORCED
ehooks_default_purge_forced,
ehooks_default_purge_forced,
#else
NULL,
NULL,
#endif
ehooks_default_split,
ehooks_default_merge
};
ehooks_default_split, ehooks_default_merge};

View file

@ -16,10 +16,10 @@ emap_init(emap_t *emap, base_t *base, bool zeroed) {
}
void
emap_update_edata_state(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
extent_state_t state) {
witness_assert_positive_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE);
emap_update_edata_state(
tsdn_t *tsdn, emap_t *emap, edata_t *edata, extent_state_t state) {
witness_assert_positive_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE);
edata_state_set(edata, state);
@ -28,10 +28,11 @@ emap_update_edata_state(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
rtree_ctx, (uintptr_t)edata_base_get(edata), /* dependent */ true,
/* init_missing */ false);
assert(elm1 != NULL);
rtree_leaf_elm_t *elm2 = edata_size_get(edata) == PAGE ? NULL :
rtree_leaf_elm_lookup(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)edata_last_get(edata), /* dependent */ true,
/* init_missing */ false);
rtree_leaf_elm_t *elm2 = edata_size_get(edata) == PAGE
? NULL
: rtree_leaf_elm_lookup(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)edata_last_get(edata), /* dependent */ true,
/* init_missing */ false);
rtree_leaf_elm_state_update(tsdn, &emap->rtree, elm1, elm2, state);
@ -42,17 +43,17 @@ static inline edata_t *
emap_try_acquire_edata_neighbor_impl(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
extent_pai_t pai, extent_state_t expected_state, bool forward,
bool expanding) {
witness_assert_positive_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE);
witness_assert_positive_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE);
assert(!edata_guarded_get(edata));
assert(!expanding || forward);
assert(!edata_state_in_transition(expected_state));
assert(expected_state == extent_state_dirty ||
expected_state == extent_state_muzzy ||
expected_state == extent_state_retained);
assert(expected_state == extent_state_dirty
|| expected_state == extent_state_muzzy
|| expected_state == extent_state_retained);
void *neighbor_addr = forward ? edata_past_get(edata) :
edata_before_get(edata);
void *neighbor_addr = forward ? edata_past_get(edata)
: edata_before_get(edata);
/*
* This is subtle; the rtree code asserts that its input pointer is
* non-NULL, and this is a useful thing to check. But it's possible
@ -73,10 +74,10 @@ emap_try_acquire_edata_neighbor_impl(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
return NULL;
}
rtree_contents_t neighbor_contents = rtree_leaf_elm_read(tsdn,
&emap->rtree, elm, /* dependent */ false);
rtree_contents_t neighbor_contents = rtree_leaf_elm_read(
tsdn, &emap->rtree, elm, /* dependent */ false);
if (!extent_can_acquire_neighbor(edata, neighbor_contents, pai,
expected_state, forward, expanding)) {
expected_state, forward, expanding)) {
return NULL;
}
@ -109,8 +110,8 @@ emap_try_acquire_edata_neighbor_expand(tsdn_t *tsdn, emap_t *emap,
}
void
emap_release_edata(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
extent_state_t new_state) {
emap_release_edata(
tsdn_t *tsdn, emap_t *emap, edata_t *edata, extent_state_t new_state) {
assert(emap_edata_in_transition(tsdn, emap, edata));
assert(emap_edata_is_acquired(tsdn, emap, edata));
@ -145,8 +146,8 @@ emap_rtree_write_acquired(tsdn_t *tsdn, emap_t *emap, rtree_leaf_elm_t *elm_a,
contents.edata = edata;
contents.metadata.szind = szind;
contents.metadata.slab = slab;
contents.metadata.is_head = (edata == NULL) ? false :
edata_is_head_get(edata);
contents.metadata.is_head = (edata == NULL) ? false
: edata_is_head_get(edata);
contents.metadata.state = (edata == NULL) ? 0 : edata_state_get(edata);
rtree_leaf_elm_write(tsdn, &emap->rtree, elm_a, contents);
if (elm_b != NULL) {
@ -155,29 +156,33 @@ emap_rtree_write_acquired(tsdn_t *tsdn, emap_t *emap, rtree_leaf_elm_t *elm_a,
}
bool
emap_register_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
szind_t szind, bool slab) {
emap_register_boundary(
tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind, bool slab) {
assert(edata_state_get(edata) == extent_state_active);
EMAP_DECLARE_RTREE_CTX;
rtree_leaf_elm_t *elm_a, *elm_b;
bool err = emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, edata,
false, true, &elm_a, &elm_b);
bool err = emap_rtree_leaf_elms_lookup(
tsdn, emap, rtree_ctx, edata, false, true, &elm_a, &elm_b);
if (err) {
return true;
}
assert(rtree_leaf_elm_read(tsdn, &emap->rtree, elm_a,
/* dependent */ false).edata == NULL);
/* dependent */ false)
.edata
== NULL);
assert(rtree_leaf_elm_read(tsdn, &emap->rtree, elm_b,
/* dependent */ false).edata == NULL);
/* dependent */ false)
.edata
== NULL);
emap_rtree_write_acquired(tsdn, emap, elm_a, elm_b, edata, szind, slab);
return false;
}
/* Invoked *after* emap_register_boundary. */
void
emap_register_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
szind_t szind) {
emap_register_interior(
tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind) {
EMAP_DECLARE_RTREE_CTX;
assert(edata_slab_get(edata));
@ -226,10 +231,10 @@ emap_deregister_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
EMAP_DECLARE_RTREE_CTX;
rtree_leaf_elm_t *elm_a, *elm_b;
emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, edata,
true, false, &elm_a, &elm_b);
emap_rtree_write_acquired(tsdn, emap, elm_a, elm_b, NULL, SC_NSIZES,
false);
emap_rtree_leaf_elms_lookup(
tsdn, emap, rtree_ctx, edata, true, false, &elm_a, &elm_b);
emap_rtree_write_acquired(
tsdn, emap, elm_a, elm_b, NULL, SC_NSIZES, false);
}
void
@ -245,8 +250,8 @@ emap_deregister_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
}
void
emap_remap(tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind,
bool slab) {
emap_remap(
tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind, bool slab) {
EMAP_DECLARE_RTREE_CTX;
if (szind != SC_NSIZES) {
@ -274,8 +279,8 @@ emap_remap(tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind,
if (slab && edata_size_get(edata) > PAGE) {
uintptr_t key = (uintptr_t)edata_past_get(edata)
- (uintptr_t)PAGE;
rtree_write(tsdn, &emap->rtree, rtree_ctx, key,
contents);
rtree_write(
tsdn, &emap->rtree, rtree_ctx, key, contents);
}
}
}
@ -344,29 +349,29 @@ emap_merge_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
clear_contents.metadata.state = (extent_state_t)0;
if (prepare->lead_elm_b != NULL) {
rtree_leaf_elm_write(tsdn, &emap->rtree,
prepare->lead_elm_b, clear_contents);
rtree_leaf_elm_write(
tsdn, &emap->rtree, prepare->lead_elm_b, clear_contents);
}
rtree_leaf_elm_t *merged_b;
if (prepare->trail_elm_b != NULL) {
rtree_leaf_elm_write(tsdn, &emap->rtree,
prepare->trail_elm_a, clear_contents);
rtree_leaf_elm_write(
tsdn, &emap->rtree, prepare->trail_elm_a, clear_contents);
merged_b = prepare->trail_elm_b;
} else {
merged_b = prepare->trail_elm_a;
}
emap_rtree_write_acquired(tsdn, emap, prepare->lead_elm_a, merged_b,
lead, SC_NSIZES, false);
emap_rtree_write_acquired(
tsdn, emap, prepare->lead_elm_a, merged_b, lead, SC_NSIZES, false);
}
void
emap_do_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
EMAP_DECLARE_RTREE_CTX;
rtree_contents_t contents = rtree_read(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)edata_base_get(edata));
rtree_contents_t contents = rtree_read(
tsdn, &emap->rtree, rtree_ctx, (uintptr_t)edata_base_get(edata));
assert(contents.edata == edata);
assert(contents.metadata.is_head == edata_is_head_get(edata));
assert(contents.metadata.state == edata_state_get(edata));
@ -375,12 +380,12 @@ emap_do_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
void
emap_do_assert_not_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
emap_full_alloc_ctx_t context1 = {0};
emap_full_alloc_ctx_try_lookup(tsdn, emap, edata_base_get(edata),
&context1);
emap_full_alloc_ctx_try_lookup(
tsdn, emap, edata_base_get(edata), &context1);
assert(context1.edata == NULL);
emap_full_alloc_ctx_t context2 = {0};
emap_full_alloc_ctx_try_lookup(tsdn, emap, edata_last_get(edata),
&context2);
emap_full_alloc_ctx_try_lookup(
tsdn, emap, edata_last_get(edata), &context2);
assert(context2.edata == NULL);
}

View file

@ -48,32 +48,32 @@ eset_nbytes_get(eset_t *eset, pszind_t pind) {
static void
eset_stats_add(eset_t *eset, pszind_t pind, size_t sz) {
size_t cur = atomic_load_zu(&eset->bin_stats[pind].nextents,
ATOMIC_RELAXED);
atomic_store_zu(&eset->bin_stats[pind].nextents, cur + 1,
ATOMIC_RELAXED);
size_t cur = atomic_load_zu(
&eset->bin_stats[pind].nextents, ATOMIC_RELAXED);
atomic_store_zu(
&eset->bin_stats[pind].nextents, cur + 1, ATOMIC_RELAXED);
cur = atomic_load_zu(&eset->bin_stats[pind].nbytes, ATOMIC_RELAXED);
atomic_store_zu(&eset->bin_stats[pind].nbytes, cur + sz,
ATOMIC_RELAXED);
atomic_store_zu(
&eset->bin_stats[pind].nbytes, cur + sz, ATOMIC_RELAXED);
}
static void
eset_stats_sub(eset_t *eset, pszind_t pind, size_t sz) {
size_t cur = atomic_load_zu(&eset->bin_stats[pind].nextents,
ATOMIC_RELAXED);
atomic_store_zu(&eset->bin_stats[pind].nextents, cur - 1,
ATOMIC_RELAXED);
size_t cur = atomic_load_zu(
&eset->bin_stats[pind].nextents, ATOMIC_RELAXED);
atomic_store_zu(
&eset->bin_stats[pind].nextents, cur - 1, ATOMIC_RELAXED);
cur = atomic_load_zu(&eset->bin_stats[pind].nbytes, ATOMIC_RELAXED);
atomic_store_zu(&eset->bin_stats[pind].nbytes, cur - sz,
ATOMIC_RELAXED);
atomic_store_zu(
&eset->bin_stats[pind].nbytes, cur - sz, ATOMIC_RELAXED);
}
void
eset_insert(eset_t *eset, edata_t *edata) {
assert(edata_state_get(edata) == eset->state);
size_t size = edata_size_get(edata);
size_t psz = sz_psz_quantize_floor(size);
size_t size = edata_size_get(edata);
size_t psz = sz_psz_quantize_floor(size);
pszind_t pind = sz_psz2ind(psz);
edata_cmp_summary_t edata_cmp_summary = edata_cmp_summary_get(edata);
@ -86,8 +86,9 @@ eset_insert(eset_t *eset, edata_t *edata) {
* There's already a min element; update the summary if we're
* about to insert a lower one.
*/
if (edata_cmp_summary_comp(edata_cmp_summary,
eset->bins[pind].heap_min) < 0) {
if (edata_cmp_summary_comp(
edata_cmp_summary, eset->bins[pind].heap_min)
< 0) {
eset->bins[pind].heap_min = edata_cmp_summary;
}
}
@ -104,19 +105,18 @@ eset_insert(eset_t *eset, edata_t *edata) {
* don't need an atomic fetch-add; we can get by with a load followed by
* a store.
*/
size_t cur_eset_npages =
atomic_load_zu(&eset->npages, ATOMIC_RELAXED);
atomic_store_zu(&eset->npages, cur_eset_npages + npages,
ATOMIC_RELAXED);
size_t cur_eset_npages = atomic_load_zu(&eset->npages, ATOMIC_RELAXED);
atomic_store_zu(
&eset->npages, cur_eset_npages + npages, ATOMIC_RELAXED);
}
void
eset_remove(eset_t *eset, edata_t *edata) {
assert(edata_state_get(edata) == eset->state ||
edata_state_in_transition(edata_state_get(edata)));
assert(edata_state_get(edata) == eset->state
|| edata_state_in_transition(edata_state_get(edata)));
size_t size = edata_size_get(edata);
size_t psz = sz_psz_quantize_floor(size);
size_t size = edata_size_get(edata);
size_t psz = sz_psz_quantize_floor(size);
pszind_t pind = sz_psz2ind(psz);
if (config_stats) {
eset_stats_sub(eset, pind, size);
@ -136,8 +136,9 @@ eset_remove(eset_t *eset, edata_t *edata) {
* summaries of the removed element and the min element should
* compare equal.
*/
if (edata_cmp_summary_comp(edata_cmp_summary,
eset->bins[pind].heap_min) == 0) {
if (edata_cmp_summary_comp(
edata_cmp_summary, eset->bins[pind].heap_min)
== 0) {
eset->bins[pind].heap_min = edata_cmp_summary_get(
edata_heap_first(&eset->bins[pind].heap));
}
@ -148,35 +149,35 @@ eset_remove(eset_t *eset, edata_t *edata) {
* As in eset_insert, we hold eset->mtx and so don't need atomic
* operations for updating eset->npages.
*/
size_t cur_extents_npages =
atomic_load_zu(&eset->npages, ATOMIC_RELAXED);
size_t cur_extents_npages = atomic_load_zu(
&eset->npages, ATOMIC_RELAXED);
assert(cur_extents_npages >= npages);
atomic_store_zu(&eset->npages,
cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
atomic_store_zu(&eset->npages, cur_extents_npages - (size >> LG_PAGE),
ATOMIC_RELAXED);
}
static edata_t *
eset_enumerate_alignment_search(eset_t *eset, size_t size, pszind_t bin_ind,
size_t alignment) {
eset_enumerate_alignment_search(
eset_t *eset, size_t size, pszind_t bin_ind, size_t alignment) {
if (edata_heap_empty(&eset->bins[bin_ind].heap)) {
return NULL;
}
edata_t *edata = NULL;
edata_t *edata = NULL;
edata_heap_enumerate_helper_t helper;
edata_heap_enumerate_prepare(&eset->bins[bin_ind].heap, &helper,
ESET_ENUMERATE_MAX_NUM, sizeof(helper.bfs_queue)/sizeof(void *));
while ((edata =
edata_heap_enumerate_next(&eset->bins[bin_ind].heap, &helper)) !=
NULL) {
ESET_ENUMERATE_MAX_NUM, sizeof(helper.bfs_queue) / sizeof(void *));
while ((edata = edata_heap_enumerate_next(
&eset->bins[bin_ind].heap, &helper))
!= NULL) {
uintptr_t base = (uintptr_t)edata_base_get(edata);
size_t candidate_size = edata_size_get(edata);
size_t candidate_size = edata_size_get(edata);
if (candidate_size < size) {
continue;
}
uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
PAGE_CEILING(alignment));
uintptr_t next_align = ALIGNMENT_CEILING(
(uintptr_t)base, PAGE_CEILING(alignment));
if (base > next_align || base + candidate_size <= next_align) {
/* Overflow or not crossing the next alignment. */
continue;
@ -198,19 +199,20 @@ eset_enumerate_search(eset_t *eset, size_t size, pszind_t bin_ind,
return NULL;
}
edata_t *ret = NULL, *edata = NULL;
edata_t *ret = NULL, *edata = NULL;
edata_heap_enumerate_helper_t helper;
edata_heap_enumerate_prepare(&eset->bins[bin_ind].heap, &helper,
ESET_ENUMERATE_MAX_NUM, sizeof(helper.bfs_queue)/sizeof(void *));
while ((edata =
edata_heap_enumerate_next(&eset->bins[bin_ind].heap, &helper)) !=
NULL) {
if ((!exact_only && edata_size_get(edata) >= size) ||
(exact_only && edata_size_get(edata) == size)) {
edata_cmp_summary_t temp_summ =
edata_cmp_summary_get(edata);
if (ret == NULL || edata_cmp_summary_comp(temp_summ,
*ret_summ) < 0) {
ESET_ENUMERATE_MAX_NUM, sizeof(helper.bfs_queue) / sizeof(void *));
while ((edata = edata_heap_enumerate_next(
&eset->bins[bin_ind].heap, &helper))
!= NULL) {
if ((!exact_only && edata_size_get(edata) >= size)
|| (exact_only && edata_size_get(edata) == size)) {
edata_cmp_summary_t temp_summ = edata_cmp_summary_get(
edata);
if (ret == NULL
|| edata_cmp_summary_comp(temp_summ, *ret_summ)
< 0) {
ret = edata;
*ret_summ = temp_summ;
}
@ -225,8 +227,8 @@ eset_enumerate_search(eset_t *eset, size_t size, pszind_t bin_ind,
* requirement. For each size, try only the first extent in the heap.
*/
static edata_t *
eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size,
size_t alignment) {
eset_fit_alignment(
eset_t *eset, size_t min_size, size_t max_size, size_t alignment) {
pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(min_size));
pszind_t pind_max = sz_psz2ind(sz_psz_quantize_ceil(max_size));
@ -234,26 +236,26 @@ eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size,
pszind_t pind_prev = sz_psz2ind(sz_psz_quantize_floor(min_size));
if (sz_large_size_classes_disabled() && pind != pind_prev) {
edata_t *ret = NULL;
ret = eset_enumerate_alignment_search(eset, min_size, pind_prev,
alignment);
ret = eset_enumerate_alignment_search(
eset, min_size, pind_prev, alignment);
if (ret != NULL) {
return ret;
}
}
for (pszind_t i =
(pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)pind);
i < pind_max;
i = (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)i + 1)) {
(pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)pind);
i < pind_max;
i = (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)i + 1)) {
assert(i < SC_NPSIZES);
assert(!edata_heap_empty(&eset->bins[i].heap));
edata_t *edata = edata_heap_first(&eset->bins[i].heap);
edata_t *edata = edata_heap_first(&eset->bins[i].heap);
uintptr_t base = (uintptr_t)edata_base_get(edata);
size_t candidate_size = edata_size_get(edata);
size_t candidate_size = edata_size_get(edata);
assert(candidate_size >= min_size);
uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
PAGE_CEILING(alignment));
uintptr_t next_align = ALIGNMENT_CEILING(
(uintptr_t)base, PAGE_CEILING(alignment));
if (base > next_align || base + candidate_size <= next_align) {
/* Overflow or not crossing the next alignment. */
continue;
@ -279,22 +281,23 @@ eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size,
* for others.
*/
static edata_t *
eset_first_fit(eset_t *eset, size_t size, bool exact_only,
unsigned lg_max_fit) {
edata_t *ret = NULL;
eset_first_fit(
eset_t *eset, size_t size, bool exact_only, unsigned lg_max_fit) {
edata_t *ret = NULL;
edata_cmp_summary_t ret_summ JEMALLOC_CC_SILENCE_INIT({0});
pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(size));
if (exact_only) {
if (sz_large_size_classes_disabled()) {
pszind_t pind_prev =
sz_psz2ind(sz_psz_quantize_floor(size));
pszind_t pind_prev = sz_psz2ind(
sz_psz_quantize_floor(size));
return eset_enumerate_search(eset, size, pind_prev,
/* exact_only */ true, &ret_summ);
} else {
return edata_heap_empty(&eset->bins[pind].heap) ? NULL:
edata_heap_first(&eset->bins[pind].heap);
return edata_heap_empty(&eset->bins[pind].heap)
? NULL
: edata_heap_first(&eset->bins[pind].heap);
}
}
@ -321,15 +324,15 @@ eset_first_fit(eset_t *eset, size_t size, bool exact_only,
* usize and thus should be enumerated.
*/
pszind_t pind_prev = sz_psz2ind(sz_psz_quantize_floor(size));
if (sz_large_size_classes_disabled() && pind != pind_prev){
if (sz_large_size_classes_disabled() && pind != pind_prev) {
ret = eset_enumerate_search(eset, size, pind_prev,
/* exact_only */ false, &ret_summ);
}
for (pszind_t i =
(pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)pind);
i < ESET_NPSIZES;
i = (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)i + 1)) {
(pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)pind);
i < ESET_NPSIZES;
i = (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)i + 1)) {
assert(!edata_heap_empty(&eset->bins[i].heap));
if (lg_max_fit == SC_PTR_BITS) {
/*
@ -342,8 +345,9 @@ eset_first_fit(eset_t *eset, size_t size, bool exact_only,
if ((sz_pind2sz(i) >> lg_max_fit) > size) {
break;
}
if (ret == NULL || edata_cmp_summary_comp(
eset->bins[i].heap_min, ret_summ) < 0) {
if (ret == NULL
|| edata_cmp_summary_comp(eset->bins[i].heap_min, ret_summ)
< 0) {
/*
* We grab the edata as early as possible, even though
* we might change it later. Practically, a large
@ -354,9 +358,10 @@ eset_first_fit(eset_t *eset, size_t size, bool exact_only,
edata_t *edata = edata_heap_first(&eset->bins[i].heap);
assert(edata_size_get(edata) >= size);
assert(ret == NULL || edata_snad_comp(edata, ret) < 0);
assert(ret == NULL || edata_cmp_summary_comp(
eset->bins[i].heap_min,
edata_cmp_summary_get(edata)) == 0);
assert(ret == NULL
|| edata_cmp_summary_comp(eset->bins[i].heap_min,
edata_cmp_summary_get(edata))
== 0);
ret = edata;
ret_summ = eset->bins[i].heap_min;
}

View file

@ -19,7 +19,7 @@ size_t opt_process_madvise_max_batch =
#else
0
#endif
;
;
static bool extent_commit_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
size_t offset, size_t length, bool growing_retained);
@ -29,8 +29,8 @@ static bool extent_purge_forced_impl(tsdn_t *tsdn, ehooks_t *ehooks,
edata_t *edata, size_t offset, size_t length, bool growing_retained);
static edata_t *extent_split_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *edata, size_t size_a, size_t size_b, bool holding_core_locks);
static bool extent_merge_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *a, edata_t *b, bool holding_core_locks);
static bool extent_merge_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *a, edata_t *b, bool holding_core_locks);
/* Used exclusively for gdump triggering. */
static atomic_zu_t curpages;
@ -42,7 +42,7 @@ static atomic_zu_t highpages;
* definition.
*/
static void extent_deregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata);
static void extent_deregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata);
static edata_t *extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
ecache_t *ecache, edata_t *expand_edata, size_t usize, size_t alignment,
bool zero, bool *commit, bool growing_retained, bool guarded);
@ -51,8 +51,8 @@ static edata_t *extent_try_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
static edata_t *extent_alloc_retained(tsdn_t *tsdn, pac_t *pac,
ehooks_t *ehooks, edata_t *expand_edata, size_t size, size_t alignment,
bool zero, bool *commit, bool guarded);
static bool extent_decommit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks,
edata_t *edata, size_t offset, size_t length);
static bool extent_decommit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks,
edata_t *edata, size_t offset, size_t length);
/******************************************************************************/
@ -73,8 +73,8 @@ extent_try_delayed_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
emap_update_edata_state(tsdn, pac->emap, edata, extent_state_active);
bool coalesced;
edata = extent_try_coalesce(tsdn, pac, ehooks, ecache,
edata, &coalesced);
edata = extent_try_coalesce(
tsdn, pac, ehooks, ecache, edata, &coalesced);
emap_update_edata_state(tsdn, pac->emap, edata, ecache->state);
if (!coalesced) {
@ -90,10 +90,10 @@ ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
bool guarded) {
assert(size != 0);
assert(alignment != 0);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
witness_assert_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0);
bool commit = true;
bool commit = true;
edata_t *edata = extent_recycle(tsdn, pac, ehooks, ecache, expand_edata,
size, alignment, zero, &commit, false, guarded);
assert(edata == NULL || edata_pai_get(edata) == EXTENT_PAI_PAC);
@ -107,10 +107,10 @@ ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
bool guarded) {
assert(size != 0);
assert(alignment != 0);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
witness_assert_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0);
bool commit = true;
bool commit = true;
edata_t *edata = extent_alloc_retained(tsdn, pac, ehooks, expand_edata,
size, alignment, zero, &commit, guarded);
if (edata == NULL) {
@ -131,10 +131,11 @@ ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
*/
return NULL;
}
void *new_addr = (expand_edata == NULL) ? NULL :
edata_past_get(expand_edata);
edata = extent_alloc_wrapper(tsdn, pac, ehooks, new_addr,
size, alignment, zero, &commit,
void *new_addr = (expand_edata == NULL)
? NULL
: edata_past_get(expand_edata);
edata = extent_alloc_wrapper(tsdn, pac, ehooks, new_addr, size,
alignment, zero, &commit,
/* growing_retained */ false);
}
@ -148,8 +149,8 @@ ecache_dalloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
assert(edata_base_get(edata) != NULL);
assert(edata_size_get(edata) != 0);
assert(edata_pai_get(edata) == EXTENT_PAI_PAC);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
witness_assert_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0);
edata_addr_set(edata, edata_base_get(edata));
edata_zeroed_set(edata, false);
@ -158,8 +159,8 @@ ecache_dalloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
}
edata_t *
ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
ecache_t *ecache, size_t npages_min) {
ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
size_t npages_min) {
malloc_mutex_lock(tsdn, &ecache->mtx);
/*
@ -194,8 +195,8 @@ ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
break;
}
/* Try to coalesce. */
if (extent_try_delayed_coalesce(tsdn, pac, ehooks, ecache,
edata)) {
if (extent_try_delayed_coalesce(
tsdn, pac, ehooks, ecache, edata)) {
break;
}
/*
@ -211,8 +212,8 @@ ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
switch (ecache->state) {
case extent_state_dirty:
case extent_state_muzzy:
emap_update_edata_state(tsdn, pac->emap, edata,
extent_state_active);
emap_update_edata_state(
tsdn, pac->emap, edata, extent_state_active);
break;
case extent_state_retained:
extent_deregister(tsdn, pac, edata);
@ -238,16 +239,16 @@ extents_abandon_vm(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
edata_t *edata, bool growing_retained) {
size_t sz = edata_size_get(edata);
if (config_stats) {
atomic_fetch_add_zu(&pac->stats->abandoned_vm, sz,
ATOMIC_RELAXED);
atomic_fetch_add_zu(
&pac->stats->abandoned_vm, sz, ATOMIC_RELAXED);
}
/*
* Leak extent after making sure its pages have already been purged, so
* that this is only a virtual memory leak.
*/
if (ecache->state == extent_state_dirty) {
if (extent_purge_lazy_impl(tsdn, ehooks, edata, 0, sz,
growing_retained)) {
if (extent_purge_lazy_impl(
tsdn, ehooks, edata, 0, sz, growing_retained)) {
extent_purge_forced_impl(tsdn, ehooks, edata, 0,
edata_size_get(edata), growing_retained);
}
@ -256,20 +257,20 @@ extents_abandon_vm(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
}
static void
extent_deactivate_locked_impl(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
edata_t *edata) {
extent_deactivate_locked_impl(
tsdn_t *tsdn, pac_t *pac, ecache_t *ecache, edata_t *edata) {
malloc_mutex_assert_owner(tsdn, &ecache->mtx);
assert(edata_arena_ind_get(edata) == ecache_ind_get(ecache));
emap_update_edata_state(tsdn, pac->emap, edata, ecache->state);
eset_t *eset = edata_guarded_get(edata) ? &ecache->guarded_eset :
&ecache->eset;
eset_t *eset = edata_guarded_get(edata) ? &ecache->guarded_eset
: &ecache->eset;
eset_insert(eset, edata);
}
static void
extent_deactivate_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
edata_t *edata) {
extent_deactivate_locked(
tsdn_t *tsdn, pac_t *pac, ecache_t *ecache, edata_t *edata) {
assert(edata_state_get(edata) == extent_state_active);
extent_deactivate_locked_impl(tsdn, pac, ecache, edata);
}
@ -282,11 +283,11 @@ extent_deactivate_check_state_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
}
static void
extent_activate_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache, eset_t *eset,
edata_t *edata) {
extent_activate_locked(
tsdn_t *tsdn, pac_t *pac, ecache_t *ecache, eset_t *eset, edata_t *edata) {
assert(edata_arena_ind_get(edata) == ecache_ind_get(ecache));
assert(edata_state_get(edata) == ecache->state ||
edata_state_get(edata) == extent_state_merging);
assert(edata_state_get(edata) == ecache->state
|| edata_state_get(edata) == extent_state_merging);
eset_remove(eset, edata);
emap_update_edata_state(tsdn, pac->emap, edata, extent_state_active);
@ -296,16 +297,18 @@ void
extent_gdump_add(tsdn_t *tsdn, const edata_t *edata) {
cassert(config_prof);
/* prof_gdump() requirement. */
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
witness_assert_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0);
if (opt_prof && edata_state_get(edata) == extent_state_active) {
size_t nadd = edata_size_get(edata) >> LG_PAGE;
size_t cur = atomic_fetch_add_zu(&curpages, nadd,
ATOMIC_RELAXED) + nadd;
size_t cur = atomic_fetch_add_zu(
&curpages, nadd, ATOMIC_RELAXED)
+ nadd;
size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED);
while (cur > high && !atomic_compare_exchange_weak_zu(
&highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) {
while (cur > high
&& !atomic_compare_exchange_weak_zu(&highpages, &high, cur,
ATOMIC_RELAXED, ATOMIC_RELAXED)) {
/*
* Don't refresh cur, because it may have decreased
* since this thread lost the highpages update race.
@ -337,7 +340,7 @@ extent_register_impl(tsdn_t *tsdn, pac_t *pac, edata_t *edata, bool gdump_add) {
* prevents other threads from accessing the edata.
*/
if (emap_register_boundary(tsdn, pac->emap, edata, SC_NSIZES,
/* slab */ false)) {
/* slab */ false)) {
return true;
}
@ -368,8 +371,7 @@ extent_reregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata) {
* Removes all pointers to the given extent from the global rtree.
*/
static void
extent_deregister_impl(tsdn_t *tsdn, pac_t *pac, edata_t *edata,
bool gdump) {
extent_deregister_impl(tsdn_t *tsdn, pac_t *pac, edata_t *edata, bool gdump) {
emap_deregister_boundary(tsdn, pac->emap, edata);
if (config_prof && gdump) {
@ -383,8 +385,7 @@ extent_deregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata) {
}
static void
extent_deregister_no_gdump_sub(tsdn_t *tsdn, pac_t *pac,
edata_t *edata) {
extent_deregister_no_gdump_sub(tsdn_t *tsdn, pac_t *pac, edata_t *edata) {
extent_deregister_impl(tsdn, pac, edata, false);
}
@ -411,7 +412,7 @@ extent_recycle_extract(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
}
edata_t *edata;
eset_t *eset = guarded ? &ecache->guarded_eset : &ecache->eset;
eset_t *eset = guarded ? &ecache->guarded_eset : &ecache->eset;
if (expand_edata != NULL) {
edata = emap_try_acquire_edata_neighbor_expand(tsdn, pac->emap,
expand_edata, EXTENT_PAI_PAC, ecache->state);
@ -419,8 +420,8 @@ extent_recycle_extract(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
/* NOLINTNEXTLINE(readability-suspicious-call-argument) */
extent_assert_can_expand(expand_edata, edata);
if (edata_size_get(edata) < size) {
emap_release_edata(tsdn, pac->emap, edata,
ecache->state);
emap_release_edata(
tsdn, pac->emap, edata, ecache->state);
edata = NULL;
}
}
@ -435,7 +436,8 @@ extent_recycle_extract(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
* put a cap on how big an extent we can split for a request.
*/
unsigned lg_max_fit = ecache->delay_coalesce
? (unsigned)opt_lg_extent_max_active_fit : SC_PTR_BITS;
? (unsigned)opt_lg_extent_max_active_fit
: SC_PTR_BITS;
/*
* If split and merge are not allowed (Windows w/o retain), try
@ -446,8 +448,7 @@ extent_recycle_extract(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
* allocations.
*/
bool exact_only = (!maps_coalesce && !opt_retain) || guarded;
edata = eset_fit(eset, size, alignment, exact_only,
lg_max_fit);
edata = eset_fit(eset, size, alignment, exact_only, lg_max_fit);
}
if (edata == NULL) {
return NULL;
@ -489,10 +490,11 @@ extent_split_interior(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
/* The result of splitting, in case of success. */
edata_t **edata, edata_t **lead, edata_t **trail,
/* The mess to clean up, in case of error. */
edata_t **to_leak, edata_t **to_salvage,
edata_t *expand_edata, size_t size, size_t alignment) {
edata_t **to_leak, edata_t **to_salvage, edata_t *expand_edata, size_t size,
size_t alignment) {
size_t leadsize = ALIGNMENT_CEILING((uintptr_t)edata_base_get(*edata),
PAGE_CEILING(alignment)) - (uintptr_t)edata_base_get(*edata);
PAGE_CEILING(alignment))
- (uintptr_t)edata_base_get(*edata);
assert(expand_edata == NULL || leadsize == 0);
if (edata_size_get(*edata) < leadsize + size) {
return extent_split_interior_cant_alloc;
@ -547,14 +549,14 @@ extent_recycle_split(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
assert(!edata_guarded_get(edata) || size == edata_size_get(edata));
malloc_mutex_assert_owner(tsdn, &ecache->mtx);
edata_t *lead;
edata_t *trail;
edata_t *to_leak JEMALLOC_CC_SILENCE_INIT(NULL);
edata_t *lead;
edata_t *trail;
edata_t *to_leak JEMALLOC_CC_SILENCE_INIT(NULL);
edata_t *to_salvage JEMALLOC_CC_SILENCE_INIT(NULL);
extent_split_interior_result_t result = extent_split_interior(
tsdn, pac, ehooks, &edata, &lead, &trail, &to_leak, &to_salvage,
expand_edata, size, alignment);
extent_split_interior_result_t result = extent_split_interior(tsdn, pac,
ehooks, &edata, &lead, &trail, &to_leak, &to_salvage, expand_edata,
size, alignment);
if (!maps_coalesce && result != extent_split_interior_ok
&& !opt_retain) {
@ -615,8 +617,8 @@ extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
malloc_mutex_lock(tsdn, &ecache->mtx);
edata_t *edata = extent_recycle_extract(tsdn, pac, ehooks, ecache,
expand_edata, size, alignment, guarded);
edata_t *edata = extent_recycle_extract(
tsdn, pac, ehooks, ecache, expand_edata, size, alignment, guarded);
if (edata == NULL) {
malloc_mutex_unlock(tsdn, &ecache->mtx);
return NULL;
@ -630,8 +632,8 @@ extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
}
assert(edata_state_get(edata) == extent_state_active);
if (extent_commit_zero(tsdn, ehooks, edata, *commit, zero,
growing_retained)) {
if (extent_commit_zero(
tsdn, ehooks, edata, *commit, zero, growing_retained)) {
extent_record(tsdn, pac, ehooks, ecache, edata);
return NULL;
}
@ -660,16 +662,16 @@ extent_handle_huge_arena_thp(tsdn_t *tsdn, pac_thp_t *pac_thp,
* be within the range of [0, 2 * (HUGEPAGE - 1)].
*/
void *huge_addr = HUGEPAGE_ADDR2BASE(addr);
void *huge_end = HUGEPAGE_ADDR2BASE((void *)((byte_t *)addr +
(uintptr_t)(size + HUGEPAGE - 1)));
void *huge_end = HUGEPAGE_ADDR2BASE(
(void *)((byte_t *)addr + (uintptr_t)(size + HUGEPAGE - 1)));
assert((uintptr_t)huge_end > (uintptr_t)huge_addr);
size_t huge_size = (uintptr_t)huge_end - (uintptr_t)huge_addr;
assert(huge_size <= (size + ((HUGEPAGE - 1) << 1)) &&
huge_size >= size);
assert(
huge_size <= (size + ((HUGEPAGE - 1) << 1)) && huge_size >= size);
if (opt_metadata_thp == metadata_thp_always ||
pac_thp->auto_thp_switched) {
if (opt_metadata_thp == metadata_thp_always
|| pac_thp->auto_thp_switched) {
pages_huge(huge_addr, huge_size);
} else {
assert(opt_metadata_thp == metadata_thp_auto);
@ -687,8 +689,10 @@ extent_handle_huge_arena_thp(tsdn_t *tsdn, pac_thp_t *pac_thp,
if (edata != NULL) {
edata_addr_set(edata, huge_addr);
edata_size_set(edata, huge_size);
edata_list_active_append(&pac_thp->thp_lazy_list, edata);
atomic_fetch_add_u(&pac_thp->n_thp_lazy, 1, ATOMIC_RELAXED);
edata_list_active_append(
&pac_thp->thp_lazy_list, edata);
atomic_fetch_add_u(
&pac_thp->n_thp_lazy, 1, ATOMIC_RELAXED);
}
malloc_mutex_unlock(tsdn, &pac_thp->lock);
}
@ -702,8 +706,8 @@ extent_handle_huge_arena_thp(tsdn_t *tsdn, pac_thp_t *pac_thp,
* virtual memory ranges retained by each shard.
*/
static edata_t *
extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
size_t size, size_t alignment, bool zero, bool *commit) {
extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size,
size_t alignment, bool zero, bool *commit) {
malloc_mutex_assert_owner(tsdn, &pac->grow_mtx);
size_t alloc_size_min = size + PAGE_CEILING(alignment) - PAGE;
@ -715,10 +719,10 @@ extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
* Find the next extent size in the series that would be large enough to
* satisfy this request.
*/
size_t alloc_size;
size_t alloc_size;
pszind_t exp_grow_skip;
bool err = exp_grow_size_prepare(&pac->exp_grow, alloc_size_min,
&alloc_size, &exp_grow_skip);
bool err = exp_grow_size_prepare(
&pac->exp_grow, alloc_size_min, &alloc_size, &exp_grow_skip);
if (err) {
goto label_err;
}
@ -730,8 +734,8 @@ extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
bool zeroed = false;
bool committed = false;
void *ptr = ehooks_alloc(tsdn, ehooks, NULL, alloc_size, PAGE, &zeroed,
&committed);
void *ptr = ehooks_alloc(
tsdn, ehooks, NULL, alloc_size, PAGE, &zeroed, &committed);
if (ptr == NULL) {
edata_cache_put(tsdn, pac->edata_cache, edata);
@ -752,23 +756,23 @@ extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
*commit = true;
}
edata_t *lead;
edata_t *trail;
edata_t *to_leak JEMALLOC_CC_SILENCE_INIT(NULL);
edata_t *lead;
edata_t *trail;
edata_t *to_leak JEMALLOC_CC_SILENCE_INIT(NULL);
edata_t *to_salvage JEMALLOC_CC_SILENCE_INIT(NULL);
extent_split_interior_result_t result = extent_split_interior(tsdn,
pac, ehooks, &edata, &lead, &trail, &to_leak, &to_salvage, NULL,
size, alignment);
extent_split_interior_result_t result = extent_split_interior(tsdn, pac,
ehooks, &edata, &lead, &trail, &to_leak, &to_salvage, NULL, size,
alignment);
if (result == extent_split_interior_ok) {
if (lead != NULL) {
extent_record(tsdn, pac, ehooks, &pac->ecache_retained,
lead);
extent_record(
tsdn, pac, ehooks, &pac->ecache_retained, lead);
}
if (trail != NULL) {
extent_record(tsdn, pac, ehooks, &pac->ecache_retained,
trail);
extent_record(
tsdn, pac, ehooks, &pac->ecache_retained, trail);
}
} else {
/*
@ -792,15 +796,15 @@ extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
}
if (*commit && !edata_committed_get(edata)) {
if (extent_commit_impl(tsdn, ehooks, edata, 0,
edata_size_get(edata), true)) {
extent_record(tsdn, pac, ehooks,
&pac->ecache_retained, edata);
if (extent_commit_impl(
tsdn, ehooks, edata, 0, edata_size_get(edata), true)) {
extent_record(
tsdn, pac, ehooks, &pac->ecache_retained, edata);
goto label_err;
}
/* A successful commit should return zeroed memory. */
if (config_debug) {
void *addr = edata_addr_get(edata);
void *addr = edata_addr_get(edata);
size_t *p = (size_t *)addr;
/* Check the first page only. */
for (size_t i = 0; i < PAGE / sizeof(size_t); i++) {
@ -819,8 +823,9 @@ extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
if (huge_arena_pac_thp.thp_madvise) {
/* Avoid using HUGEPAGE when the grow size is less than HUGEPAGE. */
if (ind != 0 && ind == huge_arena_ind && ehooks_are_default(ehooks) &&
likely(alloc_size >= HUGEPAGE)) {
if (ind != 0 && ind == huge_arena_ind
&& ehooks_are_default(ehooks)
&& likely(alloc_size >= HUGEPAGE)) {
extent_handle_huge_arena_thp(tsdn, &huge_arena_pac_thp,
pac->edata_cache, ptr, alloc_size);
}
@ -831,8 +836,8 @@ extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
extent_gdump_add(tsdn, edata);
}
if (zero && !edata_zeroed_get(edata)) {
ehooks_zero(tsdn, ehooks, edata_base_get(edata),
edata_size_get(edata));
ehooks_zero(
tsdn, ehooks, edata_base_get(edata), edata_size_get(edata));
}
return edata;
label_err:
@ -858,8 +863,8 @@ extent_alloc_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
extent_gdump_add(tsdn, edata);
}
} else if (opt_retain && expand_edata == NULL && !guarded) {
edata = extent_grow_retained(tsdn, pac, ehooks, size,
alignment, zero, commit);
edata = extent_grow_retained(
tsdn, pac, ehooks, size, alignment, zero, commit);
/* extent_grow_retained() always releases pac->grow_mtx. */
} else {
malloc_mutex_unlock(tsdn, &pac->grow_mtx);
@ -875,12 +880,12 @@ extent_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
extent_assert_can_coalesce(inner, outer);
eset_remove(&ecache->eset, outer);
bool err = extent_merge_impl(tsdn, pac, ehooks,
forward ? inner : outer, forward ? outer : inner,
bool err = extent_merge_impl(tsdn, pac, ehooks, forward ? inner : outer,
forward ? outer : inner,
/* holding_core_locks */ true);
if (err) {
extent_deactivate_check_state_locked(tsdn, pac, ecache, outer,
extent_state_merging);
extent_deactivate_check_state_locked(
tsdn, pac, ecache, outer, extent_state_merging);
}
return err;
@ -908,10 +913,12 @@ extent_try_coalesce_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
/* Try to coalesce forward. */
edata_t *next = emap_try_acquire_edata_neighbor(tsdn, pac->emap,
edata, EXTENT_PAI_PAC, ecache->state, /* forward */ true);
size_t max_next_neighbor = max_size > edata_size_get(edata) ? max_size - edata_size_get(edata) : 0;
size_t max_next_neighbor = max_size > edata_size_get(edata)
? max_size - edata_size_get(edata)
: 0;
if (next != NULL && edata_size_get(next) <= max_next_neighbor) {
if (!extent_coalesce(tsdn, pac, ehooks, ecache, edata,
next, true)) {
if (!extent_coalesce(
tsdn, pac, ehooks, ecache, edata, next, true)) {
if (ecache->delay_coalesce) {
/* Do minimal coalescing. */
*coalesced = true;
@ -924,10 +931,12 @@ extent_try_coalesce_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
/* Try to coalesce backward. */
edata_t *prev = emap_try_acquire_edata_neighbor(tsdn, pac->emap,
edata, EXTENT_PAI_PAC, ecache->state, /* forward */ false);
size_t max_prev_neighbor = max_size > edata_size_get(edata) ? max_size - edata_size_get(edata) : 0;
size_t max_prev_neighbor = max_size > edata_size_get(edata)
? max_size - edata_size_get(edata)
: 0;
if (prev != NULL && edata_size_get(prev) <= max_prev_neighbor) {
if (!extent_coalesce(tsdn, pac, ehooks, ecache, edata,
prev, false)) {
prev, false)) {
edata = prev;
if (ecache->delay_coalesce) {
/* Do minimal coalescing. */
@ -948,36 +957,33 @@ extent_try_coalesce_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
static edata_t *
extent_try_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
ecache_t *ecache, edata_t *edata, bool *coalesced) {
return extent_try_coalesce_impl(tsdn, pac, ehooks, ecache, edata,
SC_LARGE_MAXCLASS, coalesced);
return extent_try_coalesce_impl(
tsdn, pac, ehooks, ecache, edata, SC_LARGE_MAXCLASS, coalesced);
}
static edata_t *
extent_try_coalesce_large(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
ecache_t *ecache, edata_t *edata, size_t max_size, bool *coalesced) {
return extent_try_coalesce_impl(tsdn, pac, ehooks, ecache, edata,
max_size, coalesced);
return extent_try_coalesce_impl(
tsdn, pac, ehooks, ecache, edata, max_size, coalesced);
}
/* Purge a single extent to retained / unmapped directly. */
static void
extent_maximally_purge(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *edata) {
extent_maximally_purge(
tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata) {
size_t extent_size = edata_size_get(edata);
extent_dalloc_wrapper(tsdn, pac, ehooks, edata);
if (config_stats) {
/* Update stats accordingly. */
LOCKEDINT_MTX_LOCK(tsdn, *pac->stats_mtx);
locked_inc_u64(tsdn,
LOCKEDINT_MTX(*pac->stats_mtx),
locked_inc_u64(tsdn, LOCKEDINT_MTX(*pac->stats_mtx),
&pac->stats->decay_dirty.nmadvise, 1);
locked_inc_u64(tsdn,
LOCKEDINT_MTX(*pac->stats_mtx),
&pac->stats->decay_dirty.purged,
extent_size >> LG_PAGE);
locked_inc_u64(tsdn, LOCKEDINT_MTX(*pac->stats_mtx),
&pac->stats->decay_dirty.purged, extent_size >> LG_PAGE);
LOCKEDINT_MTX_UNLOCK(tsdn, *pac->stats_mtx);
atomic_fetch_sub_zu(&pac->stats->pac_mapped, extent_size,
ATOMIC_RELAXED);
atomic_fetch_sub_zu(
&pac->stats->pac_mapped, extent_size, ATOMIC_RELAXED);
}
}
@ -988,9 +994,9 @@ extent_maximally_purge(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
void
extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
edata_t *edata) {
assert((ecache->state != extent_state_dirty &&
ecache->state != extent_state_muzzy) ||
!edata_zeroed_get(edata));
assert((ecache->state != extent_state_dirty
&& ecache->state != extent_state_muzzy)
|| !edata_zeroed_get(edata));
malloc_mutex_lock(tsdn, &ecache->mtx);
@ -1001,8 +1007,8 @@ extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
}
if (!ecache->delay_coalesce) {
bool coalesced_unused;
edata = extent_try_coalesce(tsdn, pac, ehooks, ecache, edata,
&coalesced_unused);
edata = extent_try_coalesce(
tsdn, pac, ehooks, ecache, edata, &coalesced_unused);
} else if (edata_size_get(edata) >= SC_LARGE_MINCLASS) {
assert(ecache == &pac->ecache_dirty);
/* Always coalesce large extents eagerly. */
@ -1027,17 +1033,21 @@ extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
* the final coalescing that happens during the transition from dirty ecache
* to muzzy/retained ecache states.
*/
unsigned lg_max_coalesce = (unsigned)opt_lg_extent_max_active_fit;
unsigned lg_max_coalesce = (unsigned)
opt_lg_extent_max_active_fit;
size_t edata_size = edata_size_get(edata);
size_t max_size = (SC_LARGE_MAXCLASS >> lg_max_coalesce) > edata_size ? (edata_size << lg_max_coalesce) : SC_LARGE_MAXCLASS;
bool coalesced;
size_t max_size = (SC_LARGE_MAXCLASS >> lg_max_coalesce)
> edata_size
? (edata_size << lg_max_coalesce)
: SC_LARGE_MAXCLASS;
bool coalesced;
do {
assert(edata_state_get(edata) == extent_state_active);
edata = extent_try_coalesce_large(tsdn, pac, ehooks,
ecache, edata, max_size, &coalesced);
} while (coalesced);
if (edata_size_get(edata) >=
atomic_load_zu(&pac->oversize_threshold, ATOMIC_RELAXED)
if (edata_size_get(edata) >= atomic_load_zu(
&pac->oversize_threshold, ATOMIC_RELAXED)
&& !background_thread_enabled()
&& extent_may_force_decay(pac)) {
/* Shortcut to purge the oversize extent eagerly. */
@ -1053,10 +1063,9 @@ label_skip_coalesce:
}
void
extent_dalloc_gap(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *edata) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
extent_dalloc_gap(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata) {
witness_assert_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0);
if (extent_register(tsdn, pac, edata)) {
edata_cache_put(tsdn, pac->edata_cache, edata);
@ -1066,14 +1075,14 @@ extent_dalloc_gap(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
}
static bool
extent_dalloc_wrapper_try(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *edata) {
extent_dalloc_wrapper_try(
tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata) {
bool err;
assert(edata_base_get(edata) != NULL);
assert(edata_size_get(edata) != 0);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
witness_assert_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0);
edata_addr_set(edata, edata_base_get(edata));
@ -1089,8 +1098,8 @@ extent_dalloc_wrapper_try(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
}
edata_t *
extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
void *new_addr, size_t size, size_t alignment, bool zero, bool *commit,
extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, void *new_addr,
size_t size, size_t alignment, bool zero, bool *commit,
bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
@ -1100,14 +1109,14 @@ extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
return NULL;
}
size_t palignment = ALIGNMENT_CEILING(alignment, PAGE);
void *addr = ehooks_alloc(tsdn, ehooks, new_addr, size, palignment,
&zero, commit);
void *addr = ehooks_alloc(
tsdn, ehooks, new_addr, size, palignment, &zero, commit);
if (addr == NULL) {
edata_cache_put(tsdn, pac->edata_cache, edata);
return NULL;
}
edata_init(edata, ecache_ind_get(&pac->ecache_dirty), addr,
size, /* slab */ false, SC_NSIZES, extent_sn_next(pac),
edata_init(edata, ecache_ind_get(&pac->ecache_dirty), addr, size,
/* slab */ false, SC_NSIZES, extent_sn_next(pac),
extent_state_active, zero, *commit, EXTENT_PAI_PAC,
opt_retain ? EXTENT_IS_HEAD : EXTENT_NOT_HEAD);
/*
@ -1125,8 +1134,8 @@ extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
}
static void
extent_dalloc_wrapper_finish(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *edata) {
extent_dalloc_wrapper_finish(
tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata) {
if (config_prof) {
extent_gdump_sub(tsdn, edata);
}
@ -1134,11 +1143,11 @@ extent_dalloc_wrapper_finish(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
}
void
extent_dalloc_wrapper_purged(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *edata) {
extent_dalloc_wrapper_purged(
tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata) {
assert(edata_pai_get(edata) == EXTENT_PAI_PAC);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
witness_assert_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0);
/* Verify that will not go down the dalloc / munmap route. */
assert(ehooks_dalloc_will_fail(ehooks));
@ -1148,19 +1157,19 @@ extent_dalloc_wrapper_purged(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
}
void
extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *edata) {
extent_dalloc_wrapper(
tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata) {
assert(edata_pai_get(edata) == EXTENT_PAI_PAC);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
witness_assert_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0);
/* Avoid calling the default extent_dalloc unless have to. */
if (!ehooks_dalloc_will_fail(ehooks)) {
/* Remove guard pages for dalloc / unmap. */
if (edata_guarded_get(edata)) {
assert(ehooks_are_default(ehooks));
san_unguard_pages_two_sided(tsdn, ehooks, edata,
pac->emap);
san_unguard_pages_two_sided(
tsdn, ehooks, edata, pac->emap);
}
/*
* Deregister first to avoid a race with other allocating
@ -1177,15 +1186,15 @@ extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
bool zeroed;
if (!edata_committed_get(edata)) {
zeroed = true;
} else if (!extent_decommit_wrapper(tsdn, ehooks, edata, 0,
edata_size_get(edata))) {
} else if (!extent_decommit_wrapper(
tsdn, ehooks, edata, 0, edata_size_get(edata))) {
zeroed = true;
} else if (!ehooks_purge_forced(tsdn, ehooks, edata_base_get(edata),
edata_size_get(edata), 0, edata_size_get(edata))) {
edata_size_get(edata), 0, edata_size_get(edata))) {
zeroed = true;
} else if (edata_state_get(edata) == extent_state_muzzy ||
!ehooks_purge_lazy(tsdn, ehooks, edata_base_get(edata),
edata_size_get(edata), 0, edata_size_get(edata))) {
} else if (edata_state_get(edata) == extent_state_muzzy
|| !ehooks_purge_lazy(tsdn, ehooks, edata_base_get(edata),
edata_size_get(edata), 0, edata_size_get(edata))) {
zeroed = false;
} else {
zeroed = false;
@ -1196,15 +1205,15 @@ extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
}
void
extent_destroy_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *edata) {
extent_destroy_wrapper(
tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata) {
assert(edata_base_get(edata) != NULL);
assert(edata_size_get(edata) != 0);
extent_state_t state = edata_state_get(edata);
assert(state == extent_state_retained || state == extent_state_active);
assert(emap_edata_is_acquired(tsdn, pac->emap, edata));
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
witness_assert_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0);
if (edata_guarded_get(edata)) {
assert(opt_retain);
@ -1240,8 +1249,8 @@ extent_commit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
static bool
extent_decommit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
size_t offset, size_t length) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
witness_assert_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0);
bool err = ehooks_decommit(tsdn, ehooks, edata_base_get(edata),
edata_size_get(edata), offset, length);
edata_committed_set(edata, edata_committed_get(edata) && err);
@ -1261,8 +1270,8 @@ extent_purge_lazy_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
bool
extent_purge_lazy_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
size_t offset, size_t length) {
return extent_purge_lazy_impl(tsdn, ehooks, edata, offset,
length, false);
return extent_purge_lazy_impl(
tsdn, ehooks, edata, offset, length, false);
}
static bool
@ -1278,8 +1287,8 @@ extent_purge_forced_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
bool
extent_purge_forced_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
size_t offset, size_t length) {
return extent_purge_forced_impl(tsdn, ehooks, edata, offset, length,
false);
return extent_purge_forced_impl(
tsdn, ehooks, edata, offset, length, false);
}
/*
@ -1290,16 +1299,16 @@ extent_purge_forced_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
* and returns the trail (except in case of error).
*/
static edata_t *
extent_split_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *edata, size_t size_a, size_t size_b, bool holding_core_locks) {
extent_split_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata,
size_t size_a, size_t size_b, bool holding_core_locks) {
assert(edata_size_get(edata) == size_a + size_b);
/* Only the shrink path may split w/o holding core locks. */
if (holding_core_locks) {
witness_assert_positive_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE);
} else {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
witness_assert_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0);
}
if (ehooks_split_will_fail(ehooks)) {
@ -1317,8 +1326,8 @@ extent_split_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_state_get(edata), edata_zeroed_get(edata),
edata_committed_get(edata), EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
emap_prepare_t prepare;
bool err = emap_split_prepare(tsdn, pac->emap, &prepare, edata,
size_a, trail, size_b);
bool err = emap_split_prepare(
tsdn, pac->emap, &prepare, edata, size_a, trail, size_b);
if (err) {
goto label_error_b;
}
@ -1340,8 +1349,8 @@ extent_split_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
}
edata_size_set(edata, size_a);
emap_split_commit(tsdn, pac->emap, &prepare, edata, size_a, trail,
size_b);
emap_split_commit(
tsdn, pac->emap, &prepare, edata, size_a, trail, size_b);
return trail;
label_error_b:
@ -1353,8 +1362,8 @@ label_error_a:
edata_t *
extent_split_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata,
size_t size_a, size_t size_b, bool holding_core_locks) {
return extent_split_impl(tsdn, pac, ehooks, edata, size_a, size_b,
holding_core_locks);
return extent_split_impl(
tsdn, pac, ehooks, edata, size_a, size_b, holding_core_locks);
}
static bool
@ -1365,8 +1374,8 @@ extent_merge_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *a,
witness_assert_positive_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE);
} else {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
witness_assert_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0);
}
assert(edata_base_get(a) < edata_base_get(b));
@ -1391,12 +1400,13 @@ extent_merge_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *a,
emap_prepare_t prepare;
emap_merge_prepare(tsdn, pac->emap, &prepare, a, b);
assert(edata_state_get(a) == extent_state_active ||
edata_state_get(a) == extent_state_merging);
assert(edata_state_get(a) == extent_state_active
|| edata_state_get(a) == extent_state_merging);
edata_state_set(a, extent_state_active);
edata_size_set(a, edata_size_get(a) + edata_size_get(b));
edata_sn_set(a, (edata_sn_get(a) < edata_sn_get(b)) ?
edata_sn_get(a) : edata_sn_get(b));
edata_sn_set(a,
(edata_sn_get(a) < edata_sn_get(b)) ? edata_sn_get(a)
: edata_sn_get(b));
edata_zeroed_set(a, edata_zeroed_get(a) && edata_zeroed_get(b));
emap_merge_commit(tsdn, pac->emap, &prepare, a, b);
@ -1407,26 +1417,26 @@ extent_merge_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *a,
}
bool
extent_merge_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *a, edata_t *b) {
extent_merge_wrapper(
tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *a, edata_t *b) {
return extent_merge_impl(tsdn, pac, ehooks, a, b,
/* holding_core_locks */ false);
}
bool
extent_commit_zero(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
bool commit, bool zero, bool growing_retained) {
extent_commit_zero(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, bool commit,
bool zero, bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
if (commit && !edata_committed_get(edata)) {
if (extent_commit_impl(tsdn, ehooks, edata, 0,
edata_size_get(edata), growing_retained)) {
edata_size_get(edata), growing_retained)) {
return true;
}
}
if (zero && !edata_zeroed_get(edata)) {
void *addr = edata_base_get(edata);
void *addr = edata_base_get(edata);
size_t size = edata_size_get(edata);
ehooks_zero(tsdn, ehooks, addr, size);
}

View file

@ -11,14 +11,10 @@
/* NOLINTNEXTLINE(performance-no-int-to-ptr) */
#define SBRK_INVALID ((void *)-1)
const char *opt_dss = DSS_DEFAULT;
const char *opt_dss = DSS_DEFAULT;
const char *const dss_prec_names[] = {
"disabled",
"primary",
"secondary",
"N/A"
};
const char *const dss_prec_names[] = {
"disabled", "primary", "secondary", "N/A"};
/*
* Current dss precedence default, used when creating new arenas. NB: This is
@ -26,17 +22,16 @@ const char *const dss_prec_names[] = {
* guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use
* atomic operations to synchronize the setting.
*/
static atomic_u_t dss_prec_default = ATOMIC_INIT(
(unsigned)DSS_PREC_DEFAULT);
static atomic_u_t dss_prec_default = ATOMIC_INIT((unsigned)DSS_PREC_DEFAULT);
/* Base address of the DSS. */
static void *dss_base;
static void *dss_base;
/* Atomic boolean indicating whether a thread is currently extending DSS. */
static atomic_b_t dss_extending;
static atomic_b_t dss_extending;
/* Atomic boolean indicating whether the DSS is exhausted. */
static atomic_b_t dss_exhausted;
static atomic_b_t dss_exhausted;
/* Atomic current upper limit on DSS addresses. */
static atomic_p_t dss_max;
static atomic_p_t dss_max;
/******************************************************************************/
@ -76,7 +71,7 @@ extent_dss_extending_start(void) {
while (true) {
bool expected = false;
if (atomic_compare_exchange_weak_b(&dss_extending, &expected,
true, ATOMIC_ACQ_REL, ATOMIC_RELAXED)) {
true, ATOMIC_ACQ_REL, ATOMIC_RELAXED)) {
break;
}
spin_adaptive(&spinner);
@ -143,24 +138,24 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
goto label_oom;
}
bool head_state = opt_retain ? EXTENT_IS_HEAD :
EXTENT_NOT_HEAD;
bool head_state = opt_retain ? EXTENT_IS_HEAD
: EXTENT_NOT_HEAD;
/*
* Compute how much page-aligned gap space (if any) is
* necessary to satisfy alignment. This space can be
* recycled for later use.
*/
void *gap_addr_page = ALIGNMENT_ADDR2CEILING(max_cur,
PAGE);
void *gap_addr_page = ALIGNMENT_ADDR2CEILING(
max_cur, PAGE);
void *ret = ALIGNMENT_ADDR2CEILING(
gap_addr_page, alignment);
size_t gap_size_page = (uintptr_t)ret -
(uintptr_t)gap_addr_page;
size_t gap_size_page = (uintptr_t)ret
- (uintptr_t)gap_addr_page;
if (gap_size_page != 0) {
edata_init(gap, arena_ind_get(arena),
gap_addr_page, gap_size_page, false,
SC_NSIZES, extent_sn_next(
&arena->pa_shard.pac),
SC_NSIZES,
extent_sn_next(&arena->pa_shard.pac),
extent_state_active, false, true,
EXTENT_PAI_PAC, head_state);
}
@ -169,25 +164,25 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
* allocation space.
*/
void *dss_next = (void *)((byte_t *)ret + size);
if ((uintptr_t)ret < (uintptr_t)max_cur ||
(uintptr_t)dss_next < (uintptr_t)max_cur) {
if ((uintptr_t)ret < (uintptr_t)max_cur
|| (uintptr_t)dss_next < (uintptr_t)max_cur) {
goto label_oom; /* Wrap-around. */
}
/* Compute the increment, including subpage bytes. */
void *gap_addr_subpage = max_cur;
size_t gap_size_subpage = (uintptr_t)ret -
(uintptr_t)gap_addr_subpage;
void *gap_addr_subpage = max_cur;
size_t gap_size_subpage = (uintptr_t)ret
- (uintptr_t)gap_addr_subpage;
intptr_t incr = gap_size_subpage + size;
assert((uintptr_t)max_cur + incr == (uintptr_t)ret +
size);
assert(
(uintptr_t)max_cur + incr == (uintptr_t)ret + size);
/* Try to allocate. */
void *dss_prev = extent_dss_sbrk(incr);
if (dss_prev == max_cur) {
/* Success. */
atomic_store_p(&dss_max, dss_next,
ATOMIC_RELEASE);
atomic_store_p(
&dss_max, dss_next, ATOMIC_RELEASE);
extent_dss_extending_finish();
if (gap_size_page != 0) {
@ -203,17 +198,16 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
*commit = pages_decommit(ret, size);
}
if (*zero && *commit) {
edata_t edata = {0};
edata_t edata = {0};
ehooks_t *ehooks = arena_get_ehooks(
arena);
edata_init(&edata,
arena_ind_get(arena), ret, size,
size, false, SC_NSIZES,
edata_init(&edata, arena_ind_get(arena),
ret, size, size, false, SC_NSIZES,
extent_state_active, false, true,
EXTENT_PAI_PAC, head_state);
if (extent_purge_forced_wrapper(tsdn,
ehooks, &edata, 0, size)) {
ehooks, &edata, 0, size)) {
memset(ret, 0, size);
}
}
@ -225,8 +219,8 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
*/
if (dss_prev == SBRK_INVALID) {
/* OOM. */
atomic_store_b(&dss_exhausted, true,
ATOMIC_RELEASE);
atomic_store_b(
&dss_exhausted, true, ATOMIC_RELEASE);
goto label_oom;
}
}
@ -239,16 +233,16 @@ label_oom:
static bool
extent_in_dss_helper(void *addr, void *max) {
return ((uintptr_t)addr >= (uintptr_t)dss_base && (uintptr_t)addr <
(uintptr_t)max);
return ((uintptr_t)addr >= (uintptr_t)dss_base
&& (uintptr_t)addr < (uintptr_t)max);
}
bool
extent_in_dss(void *addr) {
cassert(have_dss);
return extent_in_dss_helper(addr, atomic_load_p(&dss_max,
ATOMIC_ACQUIRE));
return extent_in_dss_helper(
addr, atomic_load_p(&dss_max, ATOMIC_ACQUIRE));
}
bool
@ -257,14 +251,14 @@ extent_dss_mergeable(void *addr_a, void *addr_b) {
cassert(have_dss);
if ((uintptr_t)addr_a < (uintptr_t)dss_base && (uintptr_t)addr_b <
(uintptr_t)dss_base) {
if ((uintptr_t)addr_a < (uintptr_t)dss_base
&& (uintptr_t)addr_b < (uintptr_t)dss_base) {
return true;
}
max = atomic_load_p(&dss_max, ATOMIC_ACQUIRE);
return (extent_in_dss_helper(addr_a, max) ==
extent_in_dss_helper(addr_b, max));
return (extent_in_dss_helper(addr_a, max)
== extent_in_dss_helper(addr_b, max));
}
void
@ -273,7 +267,8 @@ extent_dss_boot(void) {
dss_base = extent_dss_sbrk(0);
atomic_store_b(&dss_extending, false, ATOMIC_RELAXED);
atomic_store_b(&dss_exhausted, dss_base == SBRK_INVALID, ATOMIC_RELAXED);
atomic_store_b(
&dss_exhausted, dss_base == SBRK_INVALID, ATOMIC_RELAXED);
atomic_store_p(&dss_max, dss_base, ATOMIC_RELAXED);
}

View file

@ -7,7 +7,7 @@
/******************************************************************************/
/* Data. */
bool opt_retain =
bool opt_retain =
#ifdef JEMALLOC_RETAIN
true
#else
@ -18,8 +18,8 @@ bool opt_retain =
/******************************************************************************/
void *
extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
bool *commit) {
extent_alloc_mmap(
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit) {
assert(alignment == ALIGNMENT_CEILING(alignment, PAGE));
void *ret = pages_map(new_addr, size, alignment, commit);
if (ret == NULL) {

View file

@ -83,8 +83,8 @@ fxp_parse(fxp_t *result, const char *str, char **end) {
}
assert(fractional_part < frac_div);
uint32_t fractional_repr = (uint32_t)(
(fractional_part << 16) / frac_div);
uint32_t fractional_repr = (uint32_t)((fractional_part << 16)
/ frac_div);
/* Success! */
*result = (integer_part << 16) + fractional_repr;
@ -99,7 +99,7 @@ fxp_print(fxp_t a, char buf[FXP_BUF_SIZE]) {
uint32_t integer_part = fxp_round_down(a);
uint32_t fractional_part = (a & ((1U << 16) - 1));
int leading_fraction_zeros = 0;
int leading_fraction_zeros = 0;
uint64_t fraction_digits = fractional_part;
for (int i = 0; i < FXP_FRACTIONAL_PART_DIGITS; i++) {
if (fraction_digits < (1U << 16)
@ -113,12 +113,12 @@ fxp_print(fxp_t a, char buf[FXP_BUF_SIZE]) {
fraction_digits /= 10;
}
size_t printed = malloc_snprintf(buf, FXP_BUF_SIZE, "%"FMTu32".",
integer_part);
size_t printed = malloc_snprintf(
buf, FXP_BUF_SIZE, "%" FMTu32 ".", integer_part);
for (int i = 0; i < leading_fraction_zeros; i++) {
buf[printed] = '0';
printed++;
}
malloc_snprintf(&buf[printed], FXP_BUF_SIZE - printed, "%"FMTu64,
fraction_digits);
malloc_snprintf(
&buf[printed], FXP_BUF_SIZE - printed, "%" FMTu64, fraction_digits);
}

View file

@ -9,19 +9,19 @@
typedef struct hooks_internal_s hooks_internal_t;
struct hooks_internal_s {
hooks_t hooks;
bool in_use;
bool in_use;
};
seq_define(hooks_internal_t, hooks)
static atomic_u_t nhooks = ATOMIC_INIT(0);
static seq_hooks_t hooks[HOOK_MAX];
static atomic_u_t nhooks = ATOMIC_INIT(0);
static seq_hooks_t hooks[HOOK_MAX];
static malloc_mutex_t hooks_mu;
bool
hook_boot(void) {
return malloc_mutex_init(&hooks_mu, "hooks", WITNESS_RANK_HOOK,
malloc_mutex_rank_exclusive);
return malloc_mutex_init(
&hooks_mu, "hooks", WITNESS_RANK_HOOK, malloc_mutex_rank_exclusive);
}
static void *
@ -84,20 +84,18 @@ hook_remove(tsdn_t *tsdn, void *opaque) {
malloc_mutex_unlock(tsdn, &hooks_mu);
}
#define FOR_EACH_HOOK_BEGIN(hooks_internal_ptr) \
for (int for_each_hook_counter = 0; \
for_each_hook_counter < HOOK_MAX; \
for_each_hook_counter++) { \
bool for_each_hook_success = seq_try_load_hooks( \
(hooks_internal_ptr), &hooks[for_each_hook_counter]); \
if (!for_each_hook_success) { \
continue; \
} \
if (!(hooks_internal_ptr)->in_use) { \
continue; \
}
#define FOR_EACH_HOOK_END \
}
#define FOR_EACH_HOOK_BEGIN(hooks_internal_ptr) \
for (int for_each_hook_counter = 0; for_each_hook_counter < HOOK_MAX; \
for_each_hook_counter++) { \
bool for_each_hook_success = seq_try_load_hooks( \
(hooks_internal_ptr), &hooks[for_each_hook_counter]); \
if (!for_each_hook_success) { \
continue; \
} \
if (!(hooks_internal_ptr)->in_use) { \
continue; \
}
#define FOR_EACH_HOOK_END }
static bool *
hook_reentrantp(void) {
@ -129,26 +127,25 @@ hook_reentrantp(void) {
* untouched.
*/
static bool in_hook_global = true;
tsdn_t *tsdn = tsdn_fetch();
bool *in_hook = tsdn_in_hookp_get(tsdn);
if (in_hook!= NULL) {
tsdn_t *tsdn = tsdn_fetch();
bool *in_hook = tsdn_in_hookp_get(tsdn);
if (in_hook != NULL) {
return in_hook;
}
return &in_hook_global;
}
#define HOOK_PROLOGUE \
if (likely(atomic_load_u(&nhooks, ATOMIC_RELAXED) == 0)) { \
return; \
} \
bool *in_hook = hook_reentrantp(); \
if (*in_hook) { \
return; \
} \
#define HOOK_PROLOGUE \
if (likely(atomic_load_u(&nhooks, ATOMIC_RELAXED) == 0)) { \
return; \
} \
bool *in_hook = hook_reentrantp(); \
if (*in_hook) { \
return; \
} \
*in_hook = true;
#define HOOK_EPILOGUE \
*in_hook = false;
#define HOOK_EPILOGUE *in_hook = false;
void
hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw,
@ -157,10 +154,10 @@ hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw,
hooks_internal_t hook;
FOR_EACH_HOOK_BEGIN(&hook)
hook_alloc h = hook.hooks.alloc_hook;
if (h != NULL) {
h(hook.hooks.extra, type, result, result_raw, args_raw);
}
hook_alloc h = hook.hooks.alloc_hook;
if (h != NULL) {
h(hook.hooks.extra, type, result, result_raw, args_raw);
}
FOR_EACH_HOOK_END
HOOK_EPILOGUE
@ -171,10 +168,10 @@ hook_invoke_dalloc(hook_dalloc_t type, void *address, uintptr_t args_raw[3]) {
HOOK_PROLOGUE
hooks_internal_t hook;
FOR_EACH_HOOK_BEGIN(&hook)
hook_dalloc h = hook.hooks.dalloc_hook;
if (h != NULL) {
h(hook.hooks.extra, type, address, args_raw);
}
hook_dalloc h = hook.hooks.dalloc_hook;
if (h != NULL) {
h(hook.hooks.extra, type, address, args_raw);
}
FOR_EACH_HOOK_END
HOOK_EPILOGUE
}
@ -185,11 +182,11 @@ hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize,
HOOK_PROLOGUE
hooks_internal_t hook;
FOR_EACH_HOOK_BEGIN(&hook)
hook_expand h = hook.hooks.expand_hook;
if (h != NULL) {
h(hook.hooks.extra, type, address, old_usize, new_usize,
result_raw, args_raw);
}
hook_expand h = hook.hooks.expand_hook;
if (h != NULL) {
h(hook.hooks.extra, type, address, old_usize, new_usize,
result_raw, args_raw);
}
FOR_EACH_HOOK_END
HOOK_EPILOGUE
}

227
src/hpa.c
View file

@ -12,17 +12,17 @@
static edata_t *hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
size_t alignment, bool zero, bool guarded, bool frequent_reuse,
bool *deferred_work_generated);
static size_t hpa_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size,
size_t nallocs, edata_list_active_t *results, bool frequent_reuse,
bool *deferred_work_generated);
static bool hpa_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated);
static bool hpa_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
size_t old_size, size_t new_size, bool *deferred_work_generated);
static void hpa_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
bool *deferred_work_generated);
static void hpa_dalloc_batch(tsdn_t *tsdn, pai_t *self,
edata_list_active_t *list, bool *deferred_work_generated);
static size_t hpa_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size,
size_t nallocs, edata_list_active_t *results, bool frequent_reuse,
bool *deferred_work_generated);
static bool hpa_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated);
static bool hpa_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
size_t old_size, size_t new_size, bool *deferred_work_generated);
static void hpa_dalloc(
tsdn_t *tsdn, pai_t *self, edata_t *edata, bool *deferred_work_generated);
static void hpa_dalloc_batch(tsdn_t *tsdn, pai_t *self,
edata_list_active_t *list, bool *deferred_work_generated);
static uint64_t hpa_time_until_deferred_work(tsdn_t *tsdn, pai_t *self);
bool
@ -70,7 +70,8 @@ hpa_do_consistency_checks(hpa_shard_t *shard) {
}
bool
hpa_central_init(hpa_central_t *central, base_t *base, const hpa_hooks_t *hooks) {
hpa_central_init(
hpa_central_t *central, base_t *base, const hpa_hooks_t *hooks) {
/* malloc_conf processing should have filtered out these cases. */
assert(hpa_supported());
bool err;
@ -89,8 +90,8 @@ hpa_central_init(hpa_central_t *central, base_t *base, const hpa_hooks_t *hooks)
static hpdata_t *
hpa_alloc_ps(tsdn_t *tsdn, hpa_central_t *central) {
return (hpdata_t *)base_alloc(tsdn, central->base, sizeof(hpdata_t),
CACHELINE);
return (hpdata_t *)base_alloc(
tsdn, central->base, sizeof(hpdata_t), CACHELINE);
}
static hpdata_t *
@ -137,8 +138,8 @@ hpa_central_extract(tsdn_t *tsdn, hpa_central_t *central, size_t size,
*/
bool commit = true;
/* Allocate address space, bailing if we fail. */
void *new_eden = pages_map(NULL, HPA_EDEN_SIZE, HUGEPAGE,
&commit);
void *new_eden = pages_map(
NULL, HPA_EDEN_SIZE, HUGEPAGE, &commit);
if (new_eden == NULL) {
*oom = true;
malloc_mutex_unlock(tsdn, &central->grow_mtx);
@ -243,8 +244,8 @@ hpa_shard_init(hpa_shard_t *shard, hpa_central_t *central, emap_t *emap,
* locking here.
*/
static void
hpa_shard_nonderived_stats_accum(hpa_shard_nonderived_stats_t *dst,
hpa_shard_nonderived_stats_t *src) {
hpa_shard_nonderived_stats_accum(
hpa_shard_nonderived_stats_t *dst, hpa_shard_nonderived_stats_t *src) {
dst->npurge_passes += src->npurge_passes;
dst->npurges += src->npurges;
dst->nhugifies += src->nhugifies;
@ -255,13 +256,13 @@ hpa_shard_nonderived_stats_accum(hpa_shard_nonderived_stats_t *dst,
void
hpa_shard_stats_accum(hpa_shard_stats_t *dst, hpa_shard_stats_t *src) {
psset_stats_accum(&dst->psset_stats, &src->psset_stats);
hpa_shard_nonderived_stats_accum(&dst->nonderived_stats,
&src->nonderived_stats);
hpa_shard_nonderived_stats_accum(
&dst->nonderived_stats, &src->nonderived_stats);
}
void
hpa_shard_stats_merge(tsdn_t *tsdn, hpa_shard_t *shard,
hpa_shard_stats_t *dst) {
hpa_shard_stats_merge(
tsdn_t *tsdn, hpa_shard_t *shard, hpa_shard_stats_t *dst) {
hpa_do_consistency_checks(shard);
malloc_mutex_lock(tsdn, &shard->grow_mtx);
@ -295,8 +296,8 @@ hpa_ndirty_max(tsdn_t *tsdn, hpa_shard_t *shard) {
if (shard->opts.dirty_mult == (fxp_t)-1) {
return (size_t)-1;
}
return fxp_mul_frac(psset_nactive(&shard->psset),
shard->opts.dirty_mult);
return fxp_mul_frac(
psset_nactive(&shard->psset), shard->opts.dirty_mult);
}
static bool
@ -307,7 +308,8 @@ hpa_hugify_blocked_by_ndirty(tsdn_t *tsdn, hpa_shard_t *shard) {
return false;
}
return hpa_adjusted_ndirty(tsdn, shard)
+ hpdata_nretained_get(to_hugify) > hpa_ndirty_max(tsdn, shard);
+ hpdata_nretained_get(to_hugify)
> hpa_ndirty_max(tsdn, shard);
}
static bool
@ -323,8 +325,8 @@ hpa_should_purge(tsdn_t *tsdn, hpa_shard_t *shard) {
}
static void
hpa_update_purge_hugify_eligibility(tsdn_t *tsdn, hpa_shard_t *shard,
hpdata_t *ps) {
hpa_update_purge_hugify_eligibility(
tsdn_t *tsdn, hpa_shard_t *shard, hpdata_t *ps) {
malloc_mutex_assert_owner(tsdn, &shard->mtx);
if (hpdata_changing_state_get(ps)) {
hpdata_purge_allowed_set(ps, false);
@ -397,7 +399,7 @@ hpa_shard_has_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard) {
#define HPA_PURGE_BATCH_MAX_DEFAULT 16
#ifndef JEMALLOC_JET
#define HPA_PURGE_BATCH_MAX HPA_PURGE_BATCH_MAX_DEFAULT
# define HPA_PURGE_BATCH_MAX HPA_PURGE_BATCH_MAX_DEFAULT
#else
size_t hpa_purge_max_batch_size_for_test = HPA_PURGE_BATCH_MAX_DEFAULT;
size_t
@ -406,20 +408,21 @@ hpa_purge_max_batch_size_for_test_set(size_t new_size) {
hpa_purge_max_batch_size_for_test = new_size;
return old_size;
}
#define HPA_PURGE_BATCH_MAX hpa_purge_max_batch_size_for_test
# define HPA_PURGE_BATCH_MAX hpa_purge_max_batch_size_for_test
#endif
static inline size_t
hpa_process_madvise_max_iovec_len(void) {
assert(opt_process_madvise_max_batch <=
PROCESS_MADVISE_MAX_BATCH_LIMIT);
return opt_process_madvise_max_batch == 0 ?
HPA_MIN_VAR_VEC_SIZE : opt_process_madvise_max_batch;
assert(
opt_process_madvise_max_batch <= PROCESS_MADVISE_MAX_BATCH_LIMIT);
return opt_process_madvise_max_batch == 0
? HPA_MIN_VAR_VEC_SIZE
: opt_process_madvise_max_batch;
}
static inline void
hpa_purge_actual_unlocked(hpa_shard_t *shard, hpa_purge_item_t *batch,
size_t batch_sz) {
hpa_purge_actual_unlocked(
hpa_shard_t *shard, hpa_purge_item_t *batch, size_t batch_sz) {
assert(batch_sz > 0);
size_t len = hpa_process_madvise_max_iovec_len();
@ -433,17 +436,18 @@ hpa_purge_actual_unlocked(hpa_shard_t *shard, hpa_purge_item_t *batch,
/* Actually do the purging, now that the lock is dropped. */
if (batch[i].dehugify) {
shard->central->hooks.dehugify(hpdata_addr_get(to_purge),
HUGEPAGE);
shard->central->hooks.dehugify(
hpdata_addr_get(to_purge), HUGEPAGE);
}
void *purge_addr;
void *purge_addr;
size_t purge_size;
size_t total_purged_on_one_hp = 0;
while (hpdata_purge_next(
to_purge, &batch[i].state, &purge_addr, &purge_size)) {
to_purge, &batch[i].state, &purge_addr, &purge_size)) {
total_purged_on_one_hp += purge_size;
assert(total_purged_on_one_hp <= HUGEPAGE);
hpa_range_accum_add(&accum, purge_addr, purge_size, shard);
hpa_range_accum_add(
&accum, purge_addr, purge_size, shard);
}
}
hpa_range_accum_finish(&accum, shard);
@ -490,10 +494,10 @@ hpa_purge_start_hp(hpa_purge_batch_t *b, psset_t *psset) {
/* Gather all the metadata we'll need during the purge. */
hp_item->dehugify = hpdata_huge_get(hp_item->hp);
size_t nranges;
size_t ndirty =
hpdata_purge_begin(hp_item->hp, &hp_item->state, &nranges);
size_t ndirty = hpdata_purge_begin(
hp_item->hp, &hp_item->state, &nranges);
/* We picked hp to purge, so it should have some dirty ranges */
assert(ndirty > 0 && nranges >0);
assert(ndirty > 0 && nranges > 0);
b->ndirty_in_batch += ndirty;
b->nranges += nranges;
return ndirty;
@ -501,8 +505,8 @@ hpa_purge_start_hp(hpa_purge_batch_t *b, psset_t *psset) {
/* Finish purge of one huge page. */
static inline void
hpa_purge_finish_hp(tsdn_t *tsdn, hpa_shard_t *shard,
hpa_purge_item_t *hp_item) {
hpa_purge_finish_hp(
tsdn_t *tsdn, hpa_shard_t *shard, hpa_purge_item_t *hp_item) {
if (hp_item->dehugify) {
shard->stats.ndehugifies++;
}
@ -523,9 +527,9 @@ hpa_purge_finish_hp(tsdn_t *tsdn, hpa_shard_t *shard,
static inline bool
hpa_batch_full(hpa_purge_batch_t *b) {
/* It's okay for ranges to go above */
return b->npurged_hp_total == b->max_hp ||
b->item_cnt == b->items_capacity ||
b->nranges >= b->range_watermark;
return b->npurged_hp_total == b->max_hp
|| b->item_cnt == b->items_capacity
|| b->nranges >= b->range_watermark;
}
static inline void
@ -547,23 +551,25 @@ hpa_purge(tsdn_t *tsdn, hpa_shard_t *shard, size_t max_hp) {
assert(max_hp > 0);
assert(HPA_PURGE_BATCH_MAX > 0);
assert(HPA_PURGE_BATCH_MAX <
(VARIABLE_ARRAY_SIZE_MAX / sizeof(hpa_purge_item_t)));
assert(HPA_PURGE_BATCH_MAX
< (VARIABLE_ARRAY_SIZE_MAX / sizeof(hpa_purge_item_t)));
VARIABLE_ARRAY(hpa_purge_item_t, items, HPA_PURGE_BATCH_MAX);
hpa_purge_batch_t batch = {
.max_hp = max_hp,
.npurged_hp_total = 0,
.items = &items[0],
.items_capacity = HPA_PURGE_BATCH_MAX,
.range_watermark = hpa_process_madvise_max_iovec_len(),
.max_hp = max_hp,
.npurged_hp_total = 0,
.items = &items[0],
.items_capacity = HPA_PURGE_BATCH_MAX,
.range_watermark = hpa_process_madvise_max_iovec_len(),
};
assert(batch.range_watermark > 0);
while (1) {
hpa_batch_pass_start(&batch);
assert(hpa_batch_empty(&batch));
while(!hpa_batch_full(&batch) && hpa_should_purge(tsdn, shard)) {
size_t ndirty = hpa_purge_start_hp(&batch, &shard->psset);
while (
!hpa_batch_full(&batch) && hpa_should_purge(tsdn, shard)) {
size_t ndirty = hpa_purge_start_hp(
&batch, &shard->psset);
if (ndirty == 0) {
break;
}
@ -582,8 +588,8 @@ hpa_purge(tsdn_t *tsdn, hpa_shard_t *shard, size_t max_hp) {
shard->npending_purge -= batch.ndirty_in_batch;
shard->stats.npurges += batch.ndirty_in_batch;
shard->central->hooks.curtime(&shard->last_purge,
/* first_reading */ false);
for (size_t i=0; i<batch.item_cnt; ++i) {
/* first_reading */ false);
for (size_t i = 0; i < batch.item_cnt; ++i) {
hpa_purge_finish_hp(tsdn, shard, &batch.items[i]);
}
}
@ -629,8 +635,8 @@ hpa_try_hugify(tsdn_t *tsdn, hpa_shard_t *shard) {
malloc_mutex_unlock(tsdn, &shard->mtx);
bool err = shard->central->hooks.hugify(hpdata_addr_get(to_hugify),
HUGEPAGE, shard->opts.hugify_sync);
bool err = shard->central->hooks.hugify(
hpdata_addr_get(to_hugify), HUGEPAGE, shard->opts.hugify_sync);
malloc_mutex_lock(tsdn, &shard->mtx);
shard->stats.nhugifies++;
@ -669,8 +675,8 @@ hpa_min_purge_interval_passed(tsdn_t *tsdn, hpa_shard_t *shard) {
* hpa_shard_do_deferred_work() call.
*/
static void
hpa_shard_maybe_do_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard,
bool forced) {
hpa_shard_maybe_do_deferred_work(
tsdn_t *tsdn, hpa_shard_t *shard, bool forced) {
malloc_mutex_assert_owner(tsdn, &shard->mtx);
if (!forced && shard->opts.deferral_allowed) {
return;
@ -704,8 +710,7 @@ hpa_shard_maybe_do_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard,
* of purging algorithm.
*/
ssize_t max_purge_nhp = shard->opts.experimental_max_purge_nhp;
if (max_purge_nhp != -1 &&
max_purges > (size_t)max_purge_nhp) {
if (max_purge_nhp != -1 && max_purges > (size_t)max_purge_nhp) {
max_purges = max_purge_nhp;
}
@ -725,9 +730,9 @@ hpa_shard_maybe_do_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard,
}
static edata_t *
hpa_try_alloc_one_no_grow(tsdn_t *tsdn, hpa_shard_t *shard, size_t size,
bool *oom) {
bool err;
hpa_try_alloc_one_no_grow(
tsdn_t *tsdn, hpa_shard_t *shard, size_t size, bool *oom) {
bool err;
edata_t *edata = edata_cache_fast_get(tsdn, &shard->ecf);
if (edata == NULL) {
*oom = true;
@ -754,8 +759,8 @@ hpa_try_alloc_one_no_grow(tsdn_t *tsdn, hpa_shard_t *shard, size_t size,
}
void *addr = hpdata_reserve_alloc(ps, size);
edata_init(edata, shard->ind, addr, size, /* slab */ false,
SC_NSIZES, /* sn */ hpdata_age_get(ps), extent_state_active,
edata_init(edata, shard->ind, addr, size, /* slab */ false, SC_NSIZES,
/* sn */ hpdata_age_get(ps), extent_state_active,
/* zeroed */ false, /* committed */ true, EXTENT_PAI_HPA,
EXTENT_NOT_HEAD);
edata_ps_set(edata, ps);
@ -768,11 +773,11 @@ hpa_try_alloc_one_no_grow(tsdn_t *tsdn, hpa_shard_t *shard, size_t size,
* dropped. This would force us to deal with a pageslab eviction down
* the error pathway, which is a pain.
*/
err = emap_register_boundary(tsdn, shard->emap, edata,
SC_NSIZES, /* slab */ false);
err = emap_register_boundary(
tsdn, shard->emap, edata, SC_NSIZES, /* slab */ false);
if (err) {
hpdata_unreserve(ps, edata_addr_get(edata),
edata_size_get(edata));
hpdata_unreserve(
ps, edata_addr_get(edata), edata_size_get(edata));
/*
* We should arguably reset dirty state here, but this would
* require some sort of prepare + commit functionality that's a
@ -800,8 +805,8 @@ hpa_try_alloc_batch_no_grow(tsdn_t *tsdn, hpa_shard_t *shard, size_t size,
malloc_mutex_lock(tsdn, &shard->mtx);
size_t nsuccess = 0;
for (; nsuccess < nallocs; nsuccess++) {
edata_t *edata = hpa_try_alloc_one_no_grow(tsdn, shard, size,
oom);
edata_t *edata = hpa_try_alloc_one_no_grow(
tsdn, shard, size, oom);
if (edata == NULL) {
break;
}
@ -819,12 +824,11 @@ hpa_alloc_batch_psset(tsdn_t *tsdn, hpa_shard_t *shard, size_t size,
size_t nallocs, edata_list_active_t *results,
bool *deferred_work_generated) {
assert(size <= HUGEPAGE);
assert(size <= shard->opts.slab_max_alloc ||
size == sz_s2u(size));
assert(size <= shard->opts.slab_max_alloc || size == sz_s2u(size));
bool oom = false;
size_t nsuccess = hpa_try_alloc_batch_no_grow(tsdn, shard, size, &oom,
nallocs, results, deferred_work_generated);
size_t nsuccess = hpa_try_alloc_batch_no_grow(
tsdn, shard, size, &oom, nallocs, results, deferred_work_generated);
if (nsuccess == nallocs || oom) {
return nsuccess;
@ -851,8 +855,8 @@ hpa_alloc_batch_psset(tsdn_t *tsdn, hpa_shard_t *shard, size_t size,
* deallocations (and allocations of smaller sizes) may still succeed
* while we're doing this potentially expensive system call.
*/
hpdata_t *ps = hpa_central_extract(tsdn, shard->central, size,
shard->age_counter++, &oom);
hpdata_t *ps = hpa_central_extract(
tsdn, shard->central, size, shard->age_counter++, &oom);
if (ps == NULL) {
malloc_mutex_unlock(tsdn, &shard->grow_mtx);
return nsuccess;
@ -894,8 +898,8 @@ hpa_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs,
bool *deferred_work_generated) {
assert(nallocs > 0);
assert((size & PAGE_MASK) == 0);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
witness_assert_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0);
hpa_shard_t *shard = hpa_from_pai(self);
/*
@ -908,16 +912,16 @@ hpa_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs,
* huge page size). These requests do not concern internal
* fragmentation with huge pages (again, the full size will be used).
*/
if (!(frequent_reuse && size <= HUGEPAGE) &&
(size > shard->opts.slab_max_alloc)) {
if (!(frequent_reuse && size <= HUGEPAGE)
&& (size > shard->opts.slab_max_alloc)) {
return 0;
}
size_t nsuccess = hpa_alloc_batch_psset(tsdn, shard, size, nallocs,
results, deferred_work_generated);
size_t nsuccess = hpa_alloc_batch_psset(
tsdn, shard, size, nallocs, results, deferred_work_generated);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
witness_assert_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0);
/*
* Guard the sanity checks with config_debug because the loop cannot be
@ -926,13 +930,13 @@ hpa_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs,
*/
if (config_debug) {
edata_t *edata;
ql_foreach(edata, &results->head, ql_link_active) {
ql_foreach (edata, &results->head, ql_link_active) {
emap_assert_mapped(tsdn, shard->emap, edata);
assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
assert(edata_state_get(edata) == extent_state_active);
assert(edata_arena_ind_get(edata) == shard->ind);
assert(edata_szind_get_maybe_invalid(edata) ==
SC_NSIZES);
assert(
edata_szind_get_maybe_invalid(edata) == SC_NSIZES);
assert(!edata_slab_get(edata));
assert(edata_committed_get(edata));
assert(edata_base_get(edata) == edata_addr_get(edata));
@ -947,8 +951,8 @@ hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
bool guarded, bool frequent_reuse, bool *deferred_work_generated) {
assert((size & PAGE_MASK) == 0);
assert(!guarded);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
witness_assert_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0);
/* We don't handle alignment or zeroing for now. */
if (alignment > PAGE || zero) {
@ -975,8 +979,8 @@ hpa_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
}
static bool
hpa_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
size_t old_size, size_t new_size, bool *deferred_work_generated) {
hpa_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
size_t new_size, bool *deferred_work_generated) {
/* Shrink not yet supported. */
return true;
}
@ -1021,7 +1025,7 @@ hpa_dalloc_locked(tsdn_t *tsdn, hpa_shard_t *shard, edata_t *edata) {
hpdata_t *ps = edata_ps_get(edata);
/* Currently, all edatas come from pageslabs. */
assert(ps != NULL);
void *unreserve_addr = edata_addr_get(edata);
void *unreserve_addr = edata_addr_get(edata);
size_t unreserve_size = edata_size_get(edata);
edata_cache_fast_put(tsdn, &shard->ecf, edata);
@ -1037,7 +1041,7 @@ hpa_dalloc_batch(tsdn_t *tsdn, pai_t *self, edata_list_active_t *list,
hpa_shard_t *shard = hpa_from_pai(self);
edata_t *edata;
ql_foreach(edata, &list->head, ql_link_active) {
ql_foreach (edata, &list->head, ql_link_active) {
hpa_dalloc_prepare_unlocked(tsdn, shard, edata);
}
@ -1048,15 +1052,14 @@ hpa_dalloc_batch(tsdn_t *tsdn, pai_t *self, edata_list_active_t *list,
hpa_dalloc_locked(tsdn, shard, edata);
}
hpa_shard_maybe_do_deferred_work(tsdn, shard, /* forced */ false);
*deferred_work_generated =
hpa_shard_has_deferred_work(tsdn, shard);
*deferred_work_generated = hpa_shard_has_deferred_work(tsdn, shard);
malloc_mutex_unlock(tsdn, &shard->mtx);
}
static void
hpa_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
bool *deferred_work_generated) {
hpa_dalloc(
tsdn_t *tsdn, pai_t *self, edata_t *edata, bool *deferred_work_generated) {
assert(!edata_guarded_get(edata));
/* Just a dalloc_batch of size 1; this lets us share logic. */
edata_list_active_t dalloc_list;
@ -1072,14 +1075,14 @@ hpa_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
static uint64_t
hpa_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) {
hpa_shard_t *shard = hpa_from_pai(self);
uint64_t time_ns = BACKGROUND_THREAD_DEFERRED_MAX;
uint64_t time_ns = BACKGROUND_THREAD_DEFERRED_MAX;
malloc_mutex_lock(tsdn, &shard->mtx);
hpdata_t *to_hugify = psset_pick_hugify(&shard->psset);
if (to_hugify != NULL) {
nstime_t time_hugify_allowed =
hpdata_time_hugify_allowed(to_hugify);
nstime_t time_hugify_allowed = hpdata_time_hugify_allowed(
to_hugify);
uint64_t since_hugify_allowed_ms =
shard->central->hooks.ms_since(&time_hugify_allowed);
/*
@ -1087,8 +1090,8 @@ hpa_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) {
* sleep for the rest.
*/
if (since_hugify_allowed_ms < shard->opts.hugify_delay_ms) {
time_ns = shard->opts.hugify_delay_ms -
since_hugify_allowed_ms;
time_ns = shard->opts.hugify_delay_ms
- since_hugify_allowed_ms;
time_ns *= 1000 * 1000;
} else {
malloc_mutex_unlock(tsdn, &shard->mtx);
@ -1110,8 +1113,8 @@ hpa_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) {
if (since_last_purge_ms < shard->opts.min_purge_interval_ms) {
uint64_t until_purge_ns;
until_purge_ns = shard->opts.min_purge_interval_ms -
since_last_purge_ms;
until_purge_ns = shard->opts.min_purge_interval_ms
- since_last_purge_ms;
until_purge_ns *= 1000 * 1000;
if (until_purge_ns < time_ns) {
@ -1176,8 +1179,8 @@ hpa_shard_destroy(tsdn_t *tsdn, hpa_shard_t *shard) {
}
void
hpa_shard_set_deferral_allowed(tsdn_t *tsdn, hpa_shard_t *shard,
bool deferral_allowed) {
hpa_shard_set_deferral_allowed(
tsdn_t *tsdn, hpa_shard_t *shard, bool deferral_allowed) {
hpa_do_consistency_checks(shard);
malloc_mutex_lock(tsdn, &shard->mtx);

View file

@ -3,26 +3,18 @@
#include "jemalloc/internal/hpa_hooks.h"
static void *hpa_hooks_map(size_t size);
static void hpa_hooks_unmap(void *ptr, size_t size);
static void hpa_hooks_purge(void *ptr, size_t size);
static bool hpa_hooks_hugify(void *ptr, size_t size, bool sync);
static void hpa_hooks_dehugify(void *ptr, size_t size);
static void hpa_hooks_curtime(nstime_t *r_nstime, bool first_reading);
static void *hpa_hooks_map(size_t size);
static void hpa_hooks_unmap(void *ptr, size_t size);
static void hpa_hooks_purge(void *ptr, size_t size);
static bool hpa_hooks_hugify(void *ptr, size_t size, bool sync);
static void hpa_hooks_dehugify(void *ptr, size_t size);
static void hpa_hooks_curtime(nstime_t *r_nstime, bool first_reading);
static uint64_t hpa_hooks_ms_since(nstime_t *past_nstime);
static bool hpa_hooks_vectorized_purge(
void *vec, size_t vlen, size_t nbytes);
static bool hpa_hooks_vectorized_purge(void *vec, size_t vlen, size_t nbytes);
const hpa_hooks_t hpa_hooks_default = {
&hpa_hooks_map,
&hpa_hooks_unmap,
&hpa_hooks_purge,
&hpa_hooks_hugify,
&hpa_hooks_dehugify,
&hpa_hooks_curtime,
&hpa_hooks_ms_since,
&hpa_hooks_vectorized_purge
};
const hpa_hooks_t hpa_hooks_default = {&hpa_hooks_map, &hpa_hooks_unmap,
&hpa_hooks_purge, &hpa_hooks_hugify, &hpa_hooks_dehugify,
&hpa_hooks_curtime, &hpa_hooks_ms_since, &hpa_hooks_vectorized_purge};
static void *
hpa_hooks_map(size_t size) {
@ -82,13 +74,12 @@ hpa_hooks_ms_since(nstime_t *past_nstime) {
return nstime_ms_since(past_nstime);
}
/* Return true if we did not purge all nbytes, or on some error */
static bool
hpa_hooks_vectorized_purge(void *vec, size_t vlen, size_t nbytes) {
#ifdef JEMALLOC_HAVE_PROCESS_MADVISE
return pages_purge_process_madvise(vec, vlen, nbytes);
return pages_purge_process_madvise(vec, vlen, nbytes);
#else
return true;
return true;
#endif
}

View file

@ -17,8 +17,7 @@ hpdata_age_comp(const hpdata_t *a, const hpdata_t *b) {
ph_gen(, hpdata_age_heap, hpdata_t, age_link, hpdata_age_comp)
void
hpdata_init(hpdata_t *hpdata, void *addr, uint64_t age) {
void hpdata_init(hpdata_t *hpdata, void *addr, uint64_t age) {
hpdata_addr_set(hpdata, addr);
hpdata_age_set(hpdata, age);
hpdata->h_huge = false;
@ -66,8 +65,8 @@ hpdata_reserve_alloc(hpdata_t *hpdata, size_t sz) {
size_t largest_unchosen_range = 0;
while (true) {
bool found = fb_urange_iter(hpdata->active_pages,
HUGEPAGE_PAGES, start, &begin, &len);
bool found = fb_urange_iter(
hpdata->active_pages, HUGEPAGE_PAGES, start, &begin, &len);
/*
* A precondition to this function is that hpdata must be able
* to serve the allocation.
@ -97,8 +96,8 @@ hpdata_reserve_alloc(hpdata_t *hpdata, size_t sz) {
* We might be about to dirty some memory for the first time; update our
* count if so.
*/
size_t new_dirty = fb_ucount(hpdata->touched_pages, HUGEPAGE_PAGES,
result, npages);
size_t new_dirty = fb_ucount(
hpdata->touched_pages, HUGEPAGE_PAGES, result, npages);
fb_set_range(hpdata->touched_pages, HUGEPAGE_PAGES, result, npages);
hpdata->h_ntouched += new_dirty;
@ -129,8 +128,8 @@ hpdata_reserve_alloc(hpdata_t *hpdata, size_t sz) {
}
hpdata_assert_consistent(hpdata);
return (void *)(
(byte_t *)hpdata_addr_get(hpdata) + (result << LG_PAGE));
return (
void *)((byte_t *)hpdata_addr_get(hpdata) + (result << LG_PAGE));
}
void
@ -148,10 +147,10 @@ hpdata_unreserve(hpdata_t *hpdata, void *addr, size_t sz) {
fb_unset_range(hpdata->active_pages, HUGEPAGE_PAGES, begin, npages);
/* We might have just created a new, larger range. */
size_t new_begin = (fb_fls(hpdata->active_pages, HUGEPAGE_PAGES,
begin) + 1);
size_t new_end = fb_ffs(hpdata->active_pages, HUGEPAGE_PAGES,
begin + npages - 1);
size_t new_begin = (fb_fls(hpdata->active_pages, HUGEPAGE_PAGES, begin)
+ 1);
size_t new_end = fb_ffs(
hpdata->active_pages, HUGEPAGE_PAGES, begin + npages - 1);
size_t new_range_len = new_end - new_begin;
if (new_range_len > old_longest_range) {
@ -164,8 +163,8 @@ hpdata_unreserve(hpdata_t *hpdata, void *addr, size_t sz) {
}
size_t
hpdata_purge_begin(hpdata_t *hpdata, hpdata_purge_state_t *purge_state,
size_t *nranges) {
hpdata_purge_begin(
hpdata_t *hpdata, hpdata_purge_state_t *purge_state, size_t *nranges) {
hpdata_assert_consistent(hpdata);
/*
* See the comment below; we might purge any inactive extent, so it's
@ -212,29 +211,29 @@ hpdata_purge_begin(hpdata_t *hpdata, hpdata_purge_state_t *purge_state,
fb_group_t dirty_pages[FB_NGROUPS(HUGEPAGE_PAGES)];
fb_init(dirty_pages, HUGEPAGE_PAGES);
fb_bit_not(dirty_pages, hpdata->active_pages, HUGEPAGE_PAGES);
fb_bit_and(dirty_pages, dirty_pages, hpdata->touched_pages,
HUGEPAGE_PAGES);
fb_bit_and(
dirty_pages, dirty_pages, hpdata->touched_pages, HUGEPAGE_PAGES);
fb_init(purge_state->to_purge, HUGEPAGE_PAGES);
size_t next_bit = 0;
*nranges = 0;
while (next_bit < HUGEPAGE_PAGES) {
size_t next_dirty = fb_ffs(dirty_pages, HUGEPAGE_PAGES,
next_bit);
size_t next_dirty = fb_ffs(
dirty_pages, HUGEPAGE_PAGES, next_bit);
/* Recall that fb_ffs returns nbits if no set bit is found. */
if (next_dirty == HUGEPAGE_PAGES) {
break;
}
size_t next_active = fb_ffs(hpdata->active_pages,
HUGEPAGE_PAGES, next_dirty);
size_t next_active = fb_ffs(
hpdata->active_pages, HUGEPAGE_PAGES, next_dirty);
/*
* Don't purge past the end of the dirty extent, into retained
* pages. This helps the kernel a tiny bit, but honestly it's
* mostly helpful for testing (where we tend to write test cases
* that think in terms of the dirty ranges).
*/
ssize_t last_dirty = fb_fls(dirty_pages, HUGEPAGE_PAGES,
next_active - 1);
ssize_t last_dirty = fb_fls(
dirty_pages, HUGEPAGE_PAGES, next_active - 1);
assert(last_dirty >= 0);
assert((size_t)last_dirty >= next_dirty);
assert((size_t)last_dirty - next_dirty + 1 <= HUGEPAGE_PAGES);
@ -249,9 +248,9 @@ hpdata_purge_begin(hpdata_t *hpdata, hpdata_purge_state_t *purge_state,
size_t ndirty = hpdata->h_ntouched - hpdata->h_nactive;
purge_state->ndirty_to_purge = ndirty;
assert(ndirty <= fb_scount(
purge_state->to_purge, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES));
assert(ndirty == fb_scount(dirty_pages, HUGEPAGE_PAGES, 0,
HUGEPAGE_PAGES));
purge_state->to_purge, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES));
assert(ndirty
== fb_scount(dirty_pages, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES));
assert(*nranges <= ndirty);
assert(ndirty == 0 || *nranges > 0);
@ -281,8 +280,8 @@ hpdata_purge_next(hpdata_t *hpdata, hpdata_purge_state_t *purge_state,
return false;
}
*r_purge_addr = (void *)(
(byte_t *)hpdata_addr_get(hpdata) + purge_begin * PAGE);
*r_purge_addr = (void *)((byte_t *)hpdata_addr_get(hpdata)
+ purge_begin * PAGE);
*r_purge_size = purge_len * PAGE;
purge_state->next_purge_search_begin = purge_begin + purge_len;
@ -299,12 +298,13 @@ hpdata_purge_end(hpdata_t *hpdata, hpdata_purge_state_t *purge_state) {
/* See the comment in reserve. */
assert(!hpdata->h_in_psset || hpdata->h_updating);
assert(purge_state->npurged == fb_scount(purge_state->to_purge,
HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES));
assert(purge_state->npurged
== fb_scount(
purge_state->to_purge, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES));
assert(purge_state->npurged >= purge_state->ndirty_to_purge);
fb_bit_not(purge_state->to_purge, purge_state->to_purge,
HUGEPAGE_PAGES);
fb_bit_not(
purge_state->to_purge, purge_state->to_purge, HUGEPAGE_PAGES);
fb_bit_and(hpdata->touched_pages, hpdata->touched_pages,
purge_state->to_purge, HUGEPAGE_PAGES);
assert(hpdata->h_ntouched >= purge_state->ndirty_to_purge);

View file

@ -3,8 +3,8 @@
#include "jemalloc/internal/inspect.h"
void
inspect_extent_util_stats_get(tsdn_t *tsdn, const void *ptr, size_t *nfree,
size_t *nregs, size_t *size) {
inspect_extent_util_stats_get(
tsdn_t *tsdn, const void *ptr, size_t *nfree, size_t *nregs, size_t *size) {
assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL);
const edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
@ -57,7 +57,7 @@ inspect_extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
&arenas[edata_arena_ind_get(edata)], ATOMIC_RELAXED);
assert(arena != NULL);
const unsigned binshard = edata_binshard_get(edata);
bin_t *bin = arena_get_bin(arena, szind, binshard);
bin_t *bin = arena_get_bin(arena, szind, binshard);
malloc_mutex_lock(tsdn, &bin->lock);
if (config_stats) {

File diff suppressed because it is too large Load diff

View file

@ -24,45 +24,52 @@ extern "C" {
//
// ... but it needs to work with jemalloc namespaces.
void *operator new(std::size_t size);
void *operator new[](std::size_t size);
void *operator new(std::size_t size, const std::nothrow_t &) noexcept;
void *operator new[](std::size_t size, const std::nothrow_t &) noexcept;
void operator delete(void *ptr) noexcept;
void operator delete[](void *ptr) noexcept;
void operator delete(void *ptr, const std::nothrow_t &) noexcept;
void operator delete[](void *ptr, const std::nothrow_t &) noexcept;
void *operator new(std::size_t size);
void *operator new[](std::size_t size);
void *operator new(std::size_t size, const std::nothrow_t &) noexcept;
void *operator new[](std::size_t size, const std::nothrow_t &) noexcept;
void operator delete(void *ptr) noexcept;
void operator delete[](void *ptr) noexcept;
void operator delete(void *ptr, const std::nothrow_t &) noexcept;
void operator delete[](void *ptr, const std::nothrow_t &) noexcept;
#if __cpp_sized_deallocation >= 201309
/* C++14's sized-delete operators. */
void operator delete(void *ptr, std::size_t size) noexcept;
void operator delete[](void *ptr, std::size_t size) noexcept;
void operator delete(void *ptr, std::size_t size) noexcept;
void operator delete[](void *ptr, std::size_t size) noexcept;
#endif
#if __cpp_aligned_new >= 201606
/* C++17's over-aligned operators. */
void *operator new(std::size_t size, std::align_val_t);
void *operator new(std::size_t size, std::align_val_t, const std::nothrow_t &) noexcept;
void *operator new[](std::size_t size, std::align_val_t);
void *operator new[](std::size_t size, std::align_val_t, const std::nothrow_t &) noexcept;
void operator delete(void* ptr, std::align_val_t) noexcept;
void operator delete(void* ptr, std::align_val_t, const std::nothrow_t &) noexcept;
void operator delete(void* ptr, std::size_t size, std::align_val_t al) noexcept;
void operator delete[](void* ptr, std::align_val_t) noexcept;
void operator delete[](void* ptr, std::align_val_t, const std::nothrow_t &) noexcept;
void operator delete[](void* ptr, std::size_t size, std::align_val_t al) noexcept;
void *operator new(std::size_t size, std::align_val_t);
void *operator new(
std::size_t size, std::align_val_t, const std::nothrow_t &) noexcept;
void *operator new[](std::size_t size, std::align_val_t);
void *operator new[](
std::size_t size, std::align_val_t, const std::nothrow_t &) noexcept;
void operator delete(void *ptr, std::align_val_t) noexcept;
void operator delete(
void *ptr, std::align_val_t, const std::nothrow_t &) noexcept;
void operator delete(void *ptr, std::size_t size, std::align_val_t al) noexcept;
void operator delete[](void *ptr, std::align_val_t) noexcept;
void operator delete[](
void *ptr, std::align_val_t, const std::nothrow_t &) noexcept;
void operator delete[](
void *ptr, std::size_t size, std::align_val_t al) noexcept;
#endif
JEMALLOC_NOINLINE
static void *
handleOOM(std::size_t size, bool nothrow) {
if (opt_experimental_infallible_new) {
const char *huge_warning = (size >= ((std::size_t)1 << 30)) ?
"This may be caused by heap corruption, if the large size "
"is unexpected (suggest building with sanitizers for "
"debugging)." : "";
const char *huge_warning = (size >= ((std::size_t)1 << 30))
? "This may be caused by heap corruption, if the large size "
"is unexpected (suggest building with sanitizers for "
"debugging)."
: "";
safety_check_fail("<jemalloc>: Allocation of size %zu failed. "
safety_check_fail(
"<jemalloc>: Allocation of size %zu failed. "
"%s opt.experimental_infallible_new is true. Aborting.\n",
size, huge_warning);
return nullptr;
@ -74,7 +81,7 @@ handleOOM(std::size_t size, bool nothrow) {
std::new_handler handler;
// GCC-4.8 and clang 4.0 do not have std::get_new_handler.
{
static std::mutex mtx;
static std::mutex mtx;
std::lock_guard<std::mutex> lock(mtx);
handler = std::set_new_handler(nullptr);
@ -98,8 +105,7 @@ handleOOM(std::size_t size, bool nothrow) {
}
template <bool IsNoExcept>
JEMALLOC_NOINLINE
static void *
JEMALLOC_NOINLINE static void *
fallbackNewImpl(std::size_t size) noexcept(IsNoExcept) {
void *ptr = malloc_default(size);
if (likely(ptr != nullptr)) {
@ -109,12 +115,11 @@ fallbackNewImpl(std::size_t size) noexcept(IsNoExcept) {
}
template <bool IsNoExcept>
JEMALLOC_ALWAYS_INLINE
void *
JEMALLOC_ALWAYS_INLINE void *
newImpl(std::size_t size) noexcept(IsNoExcept) {
LOG("core.operator_new.entry", "size: %zu", size);
void * ret = imalloc_fastpath(size, &fallbackNewImpl<IsNoExcept>);
void *ret = imalloc_fastpath(size, &fallbackNewImpl<IsNoExcept>);
LOG("core.operator_new.exit", "result: %p", ret);
return ret;
@ -143,9 +148,9 @@ operator new[](std::size_t size, const std::nothrow_t &) noexcept {
#if __cpp_aligned_new >= 201606
template <bool IsNoExcept>
JEMALLOC_ALWAYS_INLINE
void *
alignedNewImpl(std::size_t size, std::align_val_t alignment) noexcept(IsNoExcept) {
JEMALLOC_ALWAYS_INLINE void *
alignedNewImpl(std::size_t size, std::align_val_t alignment) noexcept(
IsNoExcept) {
void *ptr = je_aligned_alloc(static_cast<std::size_t>(alignment), size);
if (likely(ptr != nullptr)) {
return ptr;
@ -165,16 +170,18 @@ operator new[](std::size_t size, std::align_val_t alignment) {
}
void *
operator new(std::size_t size, std::align_val_t alignment, const std::nothrow_t &) noexcept {
operator new(std::size_t size, std::align_val_t alignment,
const std::nothrow_t &) noexcept {
return alignedNewImpl<true>(size, alignment);
}
void *
operator new[](std::size_t size, std::align_val_t alignment, const std::nothrow_t &) noexcept {
operator new[](std::size_t size, std::align_val_t alignment,
const std::nothrow_t &) noexcept {
return alignedNewImpl<true>(size, alignment);
}
#endif // __cpp_aligned_new
#endif // __cpp_aligned_new
void
operator delete(void *ptr) noexcept {
@ -203,7 +210,8 @@ operator delete(void *ptr, const std::nothrow_t &) noexcept {
LOG("core.operator_delete.exit", "");
}
void operator delete[](void *ptr, const std::nothrow_t &) noexcept {
void
operator delete[](void *ptr, const std::nothrow_t &) noexcept {
LOG("core.operator_delete.entry", "ptr: %p", ptr);
je_free_impl(ptr);
@ -215,7 +223,7 @@ void operator delete[](void *ptr, const std::nothrow_t &) noexcept {
JEMALLOC_ALWAYS_INLINE
void
sizedDeleteImpl(void* ptr, std::size_t size) noexcept {
sizedDeleteImpl(void *ptr, std::size_t size) noexcept {
if (unlikely(ptr == nullptr)) {
return;
}
@ -236,14 +244,14 @@ operator delete[](void *ptr, std::size_t size) noexcept {
sizedDeleteImpl(ptr, size);
}
#endif // __cpp_sized_deallocation
#endif // __cpp_sized_deallocation
#if __cpp_aligned_new >= 201606
JEMALLOC_ALWAYS_INLINE
void
alignedSizedDeleteImpl(void* ptr, std::size_t size, std::align_val_t alignment)
noexcept {
alignedSizedDeleteImpl(
void *ptr, std::size_t size, std::align_val_t alignment) noexcept {
if (config_debug) {
assert(((size_t)alignment & ((size_t)alignment - 1)) == 0);
}
@ -259,7 +267,7 @@ alignedSizedDeleteImpl(void* ptr, std::size_t size, std::align_val_t alignment)
}
void
operator delete(void* ptr, std::align_val_t) noexcept {
operator delete(void *ptr, std::align_val_t) noexcept {
LOG("core.operator_delete.entry", "ptr: %p", ptr);
je_free_impl(ptr);
@ -268,7 +276,7 @@ operator delete(void* ptr, std::align_val_t) noexcept {
}
void
operator delete[](void* ptr, std::align_val_t) noexcept {
operator delete[](void *ptr, std::align_val_t) noexcept {
LOG("core.operator_delete.entry", "ptr: %p", ptr);
je_free_impl(ptr);
@ -277,7 +285,7 @@ operator delete[](void* ptr, std::align_val_t) noexcept {
}
void
operator delete(void* ptr, std::align_val_t, const std::nothrow_t&) noexcept {
operator delete(void *ptr, std::align_val_t, const std::nothrow_t &) noexcept {
LOG("core.operator_delete.entry", "ptr: %p", ptr);
je_free_impl(ptr);
@ -286,7 +294,8 @@ operator delete(void* ptr, std::align_val_t, const std::nothrow_t&) noexcept {
}
void
operator delete[](void* ptr, std::align_val_t, const std::nothrow_t&) noexcept {
operator delete[](
void *ptr, std::align_val_t, const std::nothrow_t &) noexcept {
LOG("core.operator_delete.entry", "ptr: %p", ptr);
je_free_impl(ptr);
@ -295,14 +304,16 @@ operator delete[](void* ptr, std::align_val_t, const std::nothrow_t&) noexcept {
}
void
operator delete(void* ptr, std::size_t size, std::align_val_t alignment) noexcept {
operator delete(
void *ptr, std::size_t size, std::align_val_t alignment) noexcept {
alignedSizedDeleteImpl(ptr, size, alignment);
}
void
operator delete[](void* ptr, std::size_t size, std::align_val_t alignment) noexcept {
operator delete[](
void *ptr, std::size_t size, std::align_val_t alignment) noexcept {
alignedSizedDeleteImpl(ptr, size, alignment);
}
#endif // __cpp_aligned_new
#endif // __cpp_aligned_new
// NOLINTEND(misc-use-anonymous-namespace)

View file

@ -18,10 +18,10 @@ large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) {
}
void *
large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool zero) {
size_t ausize;
edata_t *edata;
large_palloc(
tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero) {
size_t ausize;
edata_t *edata;
UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
assert(!tsdn_null(tsdn) || arena != NULL);
@ -34,8 +34,10 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
if (likely(!tsdn_null(tsdn))) {
arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, usize);
}
if (unlikely(arena == NULL) || (edata = arena_extent_alloc_large(tsdn,
arena, usize, alignment, zero)) == NULL) {
if (unlikely(arena == NULL)
|| (edata = arena_extent_alloc_large(
tsdn, arena, usize, alignment, zero))
== NULL) {
return NULL;
}
@ -53,10 +55,10 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
static bool
large_ralloc_no_move_shrink(tsdn_t *tsdn, edata_t *edata, size_t usize) {
arena_t *arena = arena_get_from_edata(edata);
arena_t *arena = arena_get_from_edata(edata);
ehooks_t *ehooks = arena_get_ehooks(arena);
size_t old_size = edata_size_get(edata);
size_t old_usize = edata_usize_get(edata);
size_t old_size = edata_size_get(edata);
size_t old_usize = edata_usize_get(edata);
assert(old_usize > usize);
@ -80,8 +82,8 @@ large_ralloc_no_move_shrink(tsdn_t *tsdn, edata_t *edata, size_t usize) {
}
static bool
large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
bool zero) {
large_ralloc_no_move_expand(
tsdn_t *tsdn, edata_t *edata, size_t usize, bool zero) {
arena_t *arena = arena_get_from_edata(edata);
size_t old_size = edata_size_get(edata);
@ -112,10 +114,10 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
* offset from the beginning of the extent is a multiple
* of CACHELINE in [0 .. PAGE).
*/
void *zbase = (void *)
((byte_t *)edata_addr_get(edata) + old_usize);
void *zpast = PAGE_ADDR2BASE((void *)((byte_t *)zbase +
PAGE));
void *zbase = (void *)((byte_t *)edata_addr_get(edata)
+ old_usize);
void *zpast = PAGE_ADDR2BASE(
(void *)((byte_t *)zbase + PAGE));
size_t nzero = (byte_t *)zpast - (byte_t *)zbase;
assert(nzero > 0);
memset(zbase, 0, nzero);
@ -134,19 +136,19 @@ large_ralloc_no_move(tsdn_t *tsdn, edata_t *edata, size_t usize_min,
/* The following should have been caught by callers. */
assert(usize_min > 0 && usize_max <= SC_LARGE_MAXCLASS);
/* Both allocation sizes must be large to avoid a move. */
assert(oldusize >= SC_LARGE_MINCLASS
&& usize_max >= SC_LARGE_MINCLASS);
assert(oldusize >= SC_LARGE_MINCLASS && usize_max >= SC_LARGE_MINCLASS);
if (usize_max > oldusize) {
/* Attempt to expand the allocation in-place. */
if (!large_ralloc_no_move_expand(tsdn, edata, usize_max,
zero)) {
if (!large_ralloc_no_move_expand(
tsdn, edata, usize_max, zero)) {
arena_decay_tick(tsdn, arena_get_from_edata(edata));
return false;
}
/* Try again, this time with usize_min. */
if (usize_min < usize_max && usize_min > oldusize &&
large_ralloc_no_move_expand(tsdn, edata, usize_min, zero)) {
if (usize_min < usize_max && usize_min > oldusize
&& large_ralloc_no_move_expand(
tsdn, edata, usize_min, zero)) {
arena_decay_tick(tsdn, arena_get_from_edata(edata));
return false;
}
@ -172,8 +174,8 @@ large_ralloc_no_move(tsdn_t *tsdn, edata_t *edata, size_t usize_min,
}
static void *
large_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool zero) {
large_ralloc_move_helper(
tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero) {
if (alignment <= CACHELINE) {
return large_malloc(tsdn, arena, usize, zero);
}
@ -190,14 +192,13 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
/* The following should have been caught by callers. */
assert(usize > 0 && usize <= SC_LARGE_MAXCLASS);
/* Both allocation sizes must be large to avoid a move. */
assert(oldusize >= SC_LARGE_MINCLASS
&& usize >= SC_LARGE_MINCLASS);
assert(oldusize >= SC_LARGE_MINCLASS && usize >= SC_LARGE_MINCLASS);
/* Try to avoid moving the allocation. */
if (!large_ralloc_no_move(tsdn, edata, usize, usize, zero)) {
hook_invoke_expand(hook_args->is_realloc
? hook_expand_realloc : hook_expand_rallocx, ptr, oldusize,
usize, (uintptr_t)ptr, hook_args->args);
hook_invoke_expand(hook_args->is_realloc ? hook_expand_realloc
: hook_expand_rallocx,
ptr, oldusize, usize, (uintptr_t)ptr, hook_args->args);
return edata_addr_get(edata);
}
@ -206,17 +207,18 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
* different size class. In that case, fall back to allocating new
* space and copying.
*/
void *ret = large_ralloc_move_helper(tsdn, arena, usize, alignment,
zero);
void *ret = large_ralloc_move_helper(
tsdn, arena, usize, alignment, zero);
if (ret == NULL) {
return NULL;
}
hook_invoke_alloc(hook_args->is_realloc
? hook_alloc_realloc : hook_alloc_rallocx, ret, (uintptr_t)ret,
hook_args->args);
hook_invoke_dalloc(hook_args->is_realloc
? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args);
hook_invoke_alloc(
hook_args->is_realloc ? hook_alloc_realloc : hook_alloc_rallocx,
ret, (uintptr_t)ret, hook_args->args);
hook_invoke_dalloc(
hook_args->is_realloc ? hook_dalloc_realloc : hook_dalloc_rallocx,
ptr, hook_args->args);
size_t copysize = (usize < oldusize) ? usize : oldusize;
memcpy(ret, edata_addr_get(edata), copysize);
@ -228,8 +230,8 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
* locked indicates whether the arena's large_mtx is currently held.
*/
static void
large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
bool locked) {
large_dalloc_prep_impl(
tsdn_t *tsdn, arena_t *arena, edata_t *edata, bool locked) {
if (!locked) {
/* See comments in arena_bin_slabs_full_insert(). */
if (!arena_is_auto(arena)) {
@ -280,16 +282,16 @@ large_salloc(tsdn_t *tsdn, const edata_t *edata) {
}
void
large_prof_info_get(tsd_t *tsd, edata_t *edata, prof_info_t *prof_info,
bool reset_recent) {
large_prof_info_get(
tsd_t *tsd, edata_t *edata, prof_info_t *prof_info, bool reset_recent) {
assert(prof_info != NULL);
prof_tctx_t *alloc_tctx = edata_prof_tctx_get(edata);
prof_info->alloc_tctx = alloc_tctx;
if (prof_tctx_is_valid(alloc_tctx)) {
nstime_copy(&prof_info->alloc_time,
edata_prof_alloc_time_get(edata));
nstime_copy(
&prof_info->alloc_time, edata_prof_alloc_time_get(edata));
prof_info->alloc_size = edata_prof_alloc_size_get(edata);
if (reset_recent) {
/*

View file

@ -3,7 +3,7 @@
#include "jemalloc/internal/log.h"
char log_var_names[JEMALLOC_LOG_VAR_BUFSIZE];
char log_var_names[JEMALLOC_LOG_VAR_BUFSIZE];
atomic_b_t log_init_done = ATOMIC_INIT(false);
/*
@ -11,7 +11,7 @@ atomic_b_t log_init_done = ATOMIC_INIT(false);
* with a pointer to the first character after the end of the string.
*/
static const char *
log_var_extract_segment(const char* segment_begin) {
log_var_extract_segment(const char *segment_begin) {
const char *end;
for (end = segment_begin; *end != '\0' && *end != '|'; end++) {
}
@ -30,12 +30,12 @@ log_var_matches_segment(const char *segment_begin, const char *segment_end,
if (segment_len == 1 && *segment_begin == '.') {
return true;
}
if (segment_len == log_var_len) {
if (segment_len == log_var_len) {
return strncmp(segment_begin, log_var_begin, segment_len) == 0;
} else if (segment_len < log_var_len) {
return strncmp(segment_begin, log_var_begin, segment_len) == 0
&& log_var_begin[segment_len] == '.';
} else {
} else {
return false;
}
}
@ -61,9 +61,9 @@ log_var_update_state(log_var_t *log_var) {
segment_begin);
assert(segment_end < log_var_names + JEMALLOC_LOG_VAR_BUFSIZE);
if (log_var_matches_segment(segment_begin, segment_end,
log_var_begin, log_var_end)) {
atomic_store_u(&log_var->state, LOG_ENABLED,
ATOMIC_RELAXED);
log_var_begin, log_var_end)) {
atomic_store_u(
&log_var->state, LOG_ENABLED, ATOMIC_RELAXED);
return LOG_ENABLED;
}
if (*segment_end == '\0') {

View file

@ -5,63 +5,68 @@
#include "jemalloc/internal/util.h"
#ifdef assert
# undef assert
# undef assert
#endif
#ifdef not_reached
# undef not_reached
# undef not_reached
#endif
#ifdef not_implemented
# undef not_implemented
# undef not_implemented
#endif
#ifdef assert_not_implemented
# undef assert_not_implemented
# undef assert_not_implemented
#endif
/*
* Define simple versions of assertion macros that won't recurse in case
* of assertion failures in malloc_*printf().
*/
#define assert(e) do { \
if (config_debug && !(e)) { \
malloc_write("<jemalloc>: Failed assertion\n"); \
abort(); \
} \
} while (0)
#define assert(e) \
do { \
if (config_debug && !(e)) { \
malloc_write("<jemalloc>: Failed assertion\n"); \
abort(); \
} \
} while (0)
#define not_reached() do { \
if (config_debug) { \
malloc_write("<jemalloc>: Unreachable code reached\n"); \
abort(); \
} \
unreachable(); \
} while (0)
#define not_reached() \
do { \
if (config_debug) { \
malloc_write( \
"<jemalloc>: Unreachable code reached\n"); \
abort(); \
} \
unreachable(); \
} while (0)
#define not_implemented() do { \
if (config_debug) { \
malloc_write("<jemalloc>: Not implemented\n"); \
abort(); \
} \
} while (0)
#define not_implemented() \
do { \
if (config_debug) { \
malloc_write("<jemalloc>: Not implemented\n"); \
abort(); \
} \
} while (0)
#define assert_not_implemented(e) do { \
if (unlikely(config_debug && !(e))) { \
not_implemented(); \
} \
} while (0)
#define assert_not_implemented(e) \
do { \
if (unlikely(config_debug && !(e))) { \
not_implemented(); \
} \
} while (0)
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1)
static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s,
size_t *slen_p);
static char *u2s(
uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p);
#define D2S_BUFSIZE (1 + U2S_BUFSIZE)
static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p);
#define O2S_BUFSIZE (1 + U2S_BUFSIZE)
static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p);
#define X2S_BUFSIZE (2 + U2S_BUFSIZE)
static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s,
size_t *slen_p);
static char *x2s(
uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p);
/******************************************************************************/
@ -71,7 +76,7 @@ wrtmessage(void *cbopaque, const char *s) {
malloc_write_fd(STDERR_FILENO, s, strlen(s));
}
JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s);
JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s);
/*
* Wrapper around malloc_message() that avoids the need for
@ -93,14 +98,15 @@ malloc_write(const char *s) {
int
buferror(int err, char *buf, size_t buflen) {
#ifdef _WIN32
FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0,
(LPSTR)buf, (DWORD)buflen, NULL);
FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0, (LPSTR)buf,
(DWORD)buflen, NULL);
return 0;
#elif defined(JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE) && defined(_GNU_SOURCE)
#elif defined(JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE) \
&& defined(_GNU_SOURCE)
char *b = strerror_r(err, buf, buflen);
if (b != buf) {
strncpy(buf, b, buflen);
buf[buflen-1] = '\0';
buf[buflen - 1] = '\0';
}
return 0;
#else
@ -110,9 +116,9 @@ buferror(int err, char *buf, size_t buflen) {
uintmax_t
malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) {
uintmax_t ret, digit;
unsigned b;
bool neg;
uintmax_t ret, digit;
unsigned b;
bool neg;
const char *p, *ns;
p = nptr;
@ -128,7 +134,12 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) {
neg = false;
while (true) {
switch (*p) {
case '\t': case '\n': case '\v': case '\f': case '\r': case ' ':
case '\t':
case '\n':
case '\v':
case '\f':
case '\r':
case ' ':
p++;
break;
case '-':
@ -142,8 +153,8 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) {
}
}
/* Get prefix, if any. */
label_prefix:
/* Get prefix, if any. */
label_prefix:
/*
* Note where the first non-whitespace/sign character is so that it is
* possible to tell whether any digits are consumed (e.g., " 0" vs.
@ -152,8 +163,14 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) {
ns = p;
if (*p == '0') {
switch (p[1]) {
case '0': case '1': case '2': case '3': case '4': case '5':
case '6': case '7':
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
if (b == 0) {
b = 8;
}
@ -161,13 +178,30 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) {
p++;
}
break;
case 'X': case 'x':
case 'X':
case 'x':
switch (p[2]) {
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
case 'A': case 'B': case 'C': case 'D': case 'E':
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
case 'A':
case 'B':
case 'C':
case 'D':
case 'E':
case 'F':
case 'a': case 'b': case 'c': case 'd': case 'e':
case 'a':
case 'b':
case 'c':
case 'd':
case 'e':
case 'f':
if (b == 0) {
b = 16;
@ -244,9 +278,8 @@ u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) {
} while (x > 0);
break;
case 16: {
const char *digits = (uppercase)
? "0123456789ABCDEF"
: "0123456789abcdef";
const char *digits = (uppercase) ? "0123456789ABCDEF"
: "0123456789abcdef";
do {
i--;
@ -254,7 +287,8 @@ u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) {
x >>= 4;
} while (x > 0);
break;
} default: {
}
default: {
const char *digits = (uppercase)
? "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
: "0123456789abcdefghijklmnopqrstuvwxyz";
@ -265,7 +299,8 @@ u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) {
s[i] = digits[x % (uint64_t)base];
x /= (uint64_t)base;
} while (x > 0);
}}
}
}
*slen_p = U2S_BUFSIZE - 1 - i;
return &s[i];
@ -294,7 +329,8 @@ d2s(intmax_t x, char sign, char *s, size_t *slen_p) {
(*slen_p)++;
*s = sign;
break;
default: not_reached();
default:
not_reached();
}
return s;
}
@ -325,106 +361,112 @@ x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) {
JEMALLOC_COLD
size_t
malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
size_t i;
size_t i;
const char *f;
#define APPEND_C(c) do { \
if (i < size) { \
str[i] = (c); \
} \
i++; \
} while (0)
#define APPEND_S(s, slen) do { \
if (i < size) { \
size_t cpylen = (slen <= size - i) ? slen : size - i; \
memcpy(&str[i], s, cpylen); \
} \
i += slen; \
} while (0)
#define APPEND_PADDED_S(s, slen, width, left_justify) do { \
/* Left padding. */ \
size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \
(size_t)width - slen : 0); \
if (!left_justify && pad_len != 0) { \
size_t j; \
for (j = 0; j < pad_len; j++) { \
if (pad_zero) { \
APPEND_C('0'); \
} else { \
APPEND_C(' '); \
} \
} \
} \
/* Value. */ \
APPEND_S(s, slen); \
/* Right padding. */ \
if (left_justify && pad_len != 0) { \
size_t j; \
for (j = 0; j < pad_len; j++) { \
APPEND_C(' '); \
} \
} \
} while (0)
#define GET_ARG_NUMERIC(val, len) do { \
switch ((unsigned char)len) { \
case '?': \
val = va_arg(ap, int); \
break; \
case '?' | 0x80: \
val = va_arg(ap, unsigned int); \
break; \
case 'l': \
val = va_arg(ap, long); \
break; \
case 'l' | 0x80: \
val = va_arg(ap, unsigned long); \
break; \
case 'q': \
val = va_arg(ap, long long); \
break; \
case 'q' | 0x80: \
val = va_arg(ap, unsigned long long); \
break; \
case 'j': \
val = va_arg(ap, intmax_t); \
break; \
case 'j' | 0x80: \
val = va_arg(ap, uintmax_t); \
break; \
case 't': \
val = va_arg(ap, ptrdiff_t); \
break; \
case 'z': \
val = va_arg(ap, ssize_t); \
break; \
case 'z' | 0x80: \
val = va_arg(ap, size_t); \
break; \
case 'p': /* Synthetic; used for %p. */ \
val = va_arg(ap, uintptr_t); \
break; \
default: \
not_reached(); \
val = 0; \
} \
} while (0)
#define APPEND_C(c) \
do { \
if (i < size) { \
str[i] = (c); \
} \
i++; \
} while (0)
#define APPEND_S(s, slen) \
do { \
if (i < size) { \
size_t cpylen = (slen <= size - i) ? slen : size - i; \
memcpy(&str[i], s, cpylen); \
} \
i += slen; \
} while (0)
#define APPEND_PADDED_S(s, slen, width, left_justify) \
do { \
/* Left padding. */ \
size_t pad_len = (width == -1) \
? 0 \
: ((slen < (size_t)width) ? (size_t)width - slen : 0); \
if (!left_justify && pad_len != 0) { \
size_t j; \
for (j = 0; j < pad_len; j++) { \
if (pad_zero) { \
APPEND_C('0'); \
} else { \
APPEND_C(' '); \
} \
} \
} \
/* Value. */ \
APPEND_S(s, slen); \
/* Right padding. */ \
if (left_justify && pad_len != 0) { \
size_t j; \
for (j = 0; j < pad_len; j++) { \
APPEND_C(' '); \
} \
} \
} while (0)
#define GET_ARG_NUMERIC(val, len) \
do { \
switch ((unsigned char)len) { \
case '?': \
val = va_arg(ap, int); \
break; \
case '?' | 0x80: \
val = va_arg(ap, unsigned int); \
break; \
case 'l': \
val = va_arg(ap, long); \
break; \
case 'l' | 0x80: \
val = va_arg(ap, unsigned long); \
break; \
case 'q': \
val = va_arg(ap, long long); \
break; \
case 'q' | 0x80: \
val = va_arg(ap, unsigned long long); \
break; \
case 'j': \
val = va_arg(ap, intmax_t); \
break; \
case 'j' | 0x80: \
val = va_arg(ap, uintmax_t); \
break; \
case 't': \
val = va_arg(ap, ptrdiff_t); \
break; \
case 'z': \
val = va_arg(ap, ssize_t); \
break; \
case 'z' | 0x80: \
val = va_arg(ap, size_t); \
break; \
case 'p': /* Synthetic; used for %p. */ \
val = va_arg(ap, uintptr_t); \
break; \
default: \
not_reached(); \
val = 0; \
} \
} while (0)
i = 0;
f = format;
while (true) {
switch (*f) {
case '\0': goto label_out;
case '\0':
goto label_out;
case '%': {
bool alt_form = false;
bool left_justify = false;
bool plus_space = false;
bool plus_plus = false;
int prec = -1;
int width = -1;
bool alt_form = false;
bool left_justify = false;
bool plus_space = false;
bool plus_plus = false;
int prec = -1;
int width = -1;
unsigned char len = '?';
char *s;
size_t slen;
bool pad_zero = false;
char *s;
size_t slen;
bool pad_zero = false;
f++;
/* Flags. */
@ -446,12 +488,13 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
assert(!plus_plus);
plus_plus = true;
break;
default: goto label_width;
default:
goto label_width;
}
f++;
}
/* Width. */
label_width:
/* Width. */
label_width:
switch (*f) {
case '*':
width = va_arg(ap, int);
@ -464,16 +507,24 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
case '0':
pad_zero = true;
JEMALLOC_FALLTHROUGH;
case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9': {
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9': {
uintmax_t uwidth;
set_errno(0);
uwidth = malloc_strtoumax(f, (char **)&f, 10);
assert(uwidth != UINTMAX_MAX || get_errno() !=
ERANGE);
assert(uwidth != UINTMAX_MAX
|| get_errno() != ERANGE);
width = (int)uwidth;
break;
} default:
}
default:
break;
}
/* Width/precision separator. */
@ -488,20 +539,29 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
prec = va_arg(ap, int);
f++;
break;
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9': {
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9': {
uintmax_t uprec;
set_errno(0);
uprec = malloc_strtoumax(f, (char **)&f, 10);
assert(uprec != UINTMAX_MAX || get_errno() !=
ERANGE);
assert(uprec != UINTMAX_MAX
|| get_errno() != ERANGE);
prec = (int)uprec;
break;
}
default: break;
default:
break;
}
/* Length. */
label_length:
/* Length. */
label_length:
switch (*f) {
case 'l':
f++;
@ -512,11 +572,15 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
len = 'l';
}
break;
case 'q': case 'j': case 't': case 'z':
case 'q':
case 'j':
case 't':
case 'z':
len = *f;
f++;
break;
default: break;
default:
break;
}
/* Conversion specifier. */
switch (*f) {
@ -525,9 +589,10 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
APPEND_C(*f);
f++;
break;
case 'd': case 'i': {
case 'd':
case 'i': {
intmax_t val JEMALLOC_CC_SILENCE_INIT(0);
char buf[D2S_BUFSIZE];
char buf[D2S_BUFSIZE];
/*
* Outputting negative, zero-padded numbers
@ -542,41 +607,48 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
assert(!pad_zero);
GET_ARG_NUMERIC(val, len);
s = d2s(val, (plus_plus ? '+' : (plus_space ?
' ' : '-')), buf, &slen);
s = d2s(val,
(plus_plus ? '+'
: (plus_space ? ' ' : '-')),
buf, &slen);
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
} case 'o': {
}
case 'o': {
uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
char buf[O2S_BUFSIZE];
char buf[O2S_BUFSIZE];
GET_ARG_NUMERIC(val, len | 0x80);
s = o2s(val, alt_form, buf, &slen);
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
} case 'u': {
}
case 'u': {
uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
char buf[U2S_BUFSIZE];
char buf[U2S_BUFSIZE];
GET_ARG_NUMERIC(val, len | 0x80);
s = u2s(val, 10, false, buf, &slen);
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
} case 'x': case 'X': {
}
case 'x':
case 'X': {
uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
char buf[X2S_BUFSIZE];
char buf[X2S_BUFSIZE];
GET_ARG_NUMERIC(val, len | 0x80);
s = x2s(val, alt_form, *f == 'X', buf, &slen);
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
} case 'c': {
}
case 'c': {
unsigned char val;
char buf[2];
char buf[2];
assert(len == '?' || len == 'l');
assert_not_implemented(len != 'l');
@ -586,7 +658,8 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
APPEND_PADDED_S(buf, 1, width, left_justify);
f++;
break;
} case 's':
}
case 's':
assert(len == '?' || len == 'l');
assert_not_implemented(len != 'l');
s = va_arg(ap, char *);
@ -596,23 +669,27 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
break;
case 'p': {
uintmax_t val;
char buf[X2S_BUFSIZE];
char buf[X2S_BUFSIZE];
GET_ARG_NUMERIC(val, 'p');
s = x2s(val, true, false, buf, &slen);
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
} default: not_reached();
}
default:
not_reached();
}
break;
} default: {
}
default: {
APPEND_C(*f);
f++;
break;
}}
}
}
}
label_out:
label_out:
if (i < size) {
str[i] = '\0';
} else {
@ -629,7 +706,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
JEMALLOC_FORMAT_PRINTF(3, 4)
size_t
malloc_snprintf(char *str, size_t size, const char *format, ...) {
size_t ret;
size_t ret;
va_list ap;
va_start(ap, format);
@ -640,8 +717,8 @@ malloc_snprintf(char *str, size_t size, const char *format, ...) {
}
void
malloc_vcprintf(write_cb_t *write_cb, void *cbopaque, const char *format,
va_list ap) {
malloc_vcprintf(
write_cb_t *write_cb, void *cbopaque, const char *format, va_list ap) {
char buf[MALLOC_PRINTF_BUFSIZE];
if (write_cb == NULL) {
@ -650,8 +727,8 @@ malloc_vcprintf(write_cb_t *write_cb, void *cbopaque, const char *format,
* function, so use the default one. malloc_write() is an
* inline function, so use malloc_message() directly here.
*/
write_cb = (je_malloc_message != NULL) ? je_malloc_message :
wrtmessage;
write_cb = (je_malloc_message != NULL) ? je_malloc_message
: wrtmessage;
}
malloc_vsnprintf(buf, sizeof(buf), format, ap);

View file

@ -6,7 +6,7 @@
#include "jemalloc/internal/spin.h"
#if defined(_WIN32) && !defined(_CRT_SPINCOUNT)
#define _CRT_SPINCOUNT 4000
# define _CRT_SPINCOUNT 4000
#endif
/*
@ -22,8 +22,8 @@ int64_t opt_mutex_max_spin = 600;
bool isthreaded = false;
#endif
#ifdef JEMALLOC_MUTEX_INIT_CB
static bool postpone_init = true;
static malloc_mutex_t *postponed_mutexes = NULL;
static bool postpone_init = true;
static malloc_mutex_t *postponed_mutexes = NULL;
#endif
/******************************************************************************/
@ -44,14 +44,14 @@ pthread_create(pthread_t *__restrict thread,
/******************************************************************************/
#ifdef JEMALLOC_MUTEX_INIT_CB
JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
void *(calloc_cb)(size_t, size_t));
JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(
pthread_mutex_t *mutex, void *(calloc_cb)(size_t, size_t));
#endif
void
malloc_mutex_lock_slow(malloc_mutex_t *mutex) {
mutex_prof_data_t *data = &mutex->prof_data;
nstime_t before;
nstime_t before;
if (ncpus == 1) {
goto label_spin_done;
@ -61,7 +61,7 @@ malloc_mutex_lock_slow(malloc_mutex_t *mutex) {
do {
spin_cpu_spinwait();
if (!atomic_load_b(&mutex->locked, ATOMIC_RELAXED)
&& !malloc_mutex_trylock_final(mutex)) {
&& !malloc_mutex_trylock_final(mutex)) {
data->n_spin_acquired++;
return;
}
@ -77,8 +77,9 @@ label_spin_done:
/* Copy before to after to avoid clock skews. */
nstime_t after;
nstime_copy(&after, &before);
uint32_t n_thds = atomic_fetch_add_u32(&data->n_waiting_thds, 1,
ATOMIC_RELAXED) + 1;
uint32_t n_thds = atomic_fetch_add_u32(
&data->n_waiting_thds, 1, ATOMIC_RELAXED)
+ 1;
/* One last try as above two calls may take quite some cycles. */
if (!malloc_mutex_trylock_final(mutex)) {
atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
@ -137,27 +138,28 @@ mutex_addr_comp(const witness_t *witness1, void *mutex1,
}
bool
malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
witness_rank_t rank, malloc_mutex_lock_order_t lock_order) {
malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank,
malloc_mutex_lock_order_t lock_order) {
mutex_prof_data_init(&mutex->prof_data);
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
# if _WIN32_WINNT >= 0x0600
InitializeSRWLock(&mutex->lock);
# else
if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
_CRT_SPINCOUNT)) {
# else
if (!InitializeCriticalSectionAndSpinCount(
&mutex->lock, _CRT_SPINCOUNT)) {
return true;
}
# endif
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
mutex->lock = OS_UNFAIR_LOCK_INIT;
mutex->lock = OS_UNFAIR_LOCK_INIT;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
if (postpone_init) {
mutex->postponed_next = postponed_mutexes;
postponed_mutexes = mutex;
} else {
if (_pthread_mutex_init_calloc_cb(&mutex->lock,
bootstrap_calloc) != 0) {
if (_pthread_mutex_init_calloc_cb(
&mutex->lock, bootstrap_calloc)
!= 0) {
return true;
}
}
@ -201,9 +203,10 @@ malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) {
#ifdef JEMALLOC_MUTEX_INIT_CB
malloc_mutex_unlock(tsdn, mutex);
#else
if (malloc_mutex_init(mutex, mutex->witness.name,
mutex->witness.rank, mutex->lock_order)) {
malloc_printf("<jemalloc>: Error re-initializing mutex in "
if (malloc_mutex_init(mutex, mutex->witness.name, mutex->witness.rank,
mutex->lock_order)) {
malloc_printf(
"<jemalloc>: Error re-initializing mutex in "
"child\n");
if (opt_abort) {
abort();
@ -217,8 +220,9 @@ malloc_mutex_boot(void) {
#ifdef JEMALLOC_MUTEX_INIT_CB
postpone_init = false;
while (postponed_mutexes != NULL) {
if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
bootstrap_calloc) != 0) {
if (_pthread_mutex_init_calloc_cb(
&postponed_mutexes->lock, bootstrap_calloc)
!= 0) {
return true;
}
postponed_mutexes = postponed_mutexes->postponed_next;

View file

@ -5,8 +5,8 @@
#include "jemalloc/internal/assert.h"
#define BILLION UINT64_C(1000000000)
#define MILLION UINT64_C(1000000)
#define BILLION UINT64_C(1000000000)
#define MILLION UINT64_C(1000000)
static void
nstime_set_initialized(nstime_t *time) {
@ -22,8 +22,8 @@ nstime_assert_initialized(const nstime_t *time) {
* Some parts (e.g. stats) rely on memset to zero initialize. Treat
* these as valid initialization.
*/
assert(time->magic == NSTIME_MAGIC ||
(time->magic == 0 && time->ns == 0));
assert(
time->magic == NSTIME_MAGIC || (time->magic == 0 && time->ns == 0));
#endif
}
@ -133,8 +133,10 @@ nstime_isubtract(nstime_t *time, uint64_t subtrahend) {
void
nstime_imultiply(nstime_t *time, uint64_t multiplier) {
nstime_assert_initialized(time);
assert((((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) <<
2))) == 0) || ((time->ns * multiplier) / multiplier == time->ns));
assert(
(((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) << 2)))
== 0)
|| ((time->ns * multiplier) / multiplier == time->ns));
nstime_initialize_operand(time);
time->ns *= multiplier;
@ -178,7 +180,7 @@ nstime_ms_since(const nstime_t *past) {
}
#ifdef _WIN32
# define NSTIME_MONOTONIC false
# define NSTIME_MONOTONIC false
static void
nstime_get(nstime_t *time) {
FILETIME ft;
@ -190,7 +192,7 @@ nstime_get(nstime_t *time) {
nstime_init(time, ticks_100ns * 100);
}
#elif defined(JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE)
# define NSTIME_MONOTONIC true
# define NSTIME_MONOTONIC true
static void
nstime_get(nstime_t *time) {
struct timespec ts;
@ -199,7 +201,7 @@ nstime_get(nstime_t *time) {
nstime_init2(time, ts.tv_sec, ts.tv_nsec);
}
#elif defined(JEMALLOC_HAVE_CLOCK_MONOTONIC)
# define NSTIME_MONOTONIC true
# define NSTIME_MONOTONIC true
static void
nstime_get(nstime_t *time) {
struct timespec ts;
@ -208,24 +210,24 @@ nstime_get(nstime_t *time) {
nstime_init2(time, ts.tv_sec, ts.tv_nsec);
}
#elif defined(JEMALLOC_HAVE_CLOCK_GETTIME_NSEC_NP)
# define NSTIME_MONOTONIC true
# define NSTIME_MONOTONIC true
static void
nstime_get(nstime_t *time) {
nstime_init(time, clock_gettime_nsec_np(CLOCK_UPTIME_RAW));
}
#elif defined(JEMALLOC_HAVE_MACH_ABSOLUTE_TIME)
# define NSTIME_MONOTONIC true
# define NSTIME_MONOTONIC true
static void
nstime_get(nstime_t *time) {
static mach_timebase_info_data_t sTimebaseInfo;
if (sTimebaseInfo.denom == 0) {
(void) mach_timebase_info(&sTimebaseInfo);
(void)mach_timebase_info(&sTimebaseInfo);
}
nstime_init(time, mach_absolute_time() * sTimebaseInfo.numer
/ sTimebaseInfo.denom);
nstime_init(time,
mach_absolute_time() * sTimebaseInfo.numer / sTimebaseInfo.denom);
}
#else
# define NSTIME_MONOTONIC false
# define NSTIME_MONOTONIC false
static void
nstime_get(nstime_t *time) {
struct timeval tv;
@ -242,15 +244,13 @@ nstime_monotonic_impl(void) {
}
nstime_monotonic_t *JET_MUTABLE nstime_monotonic = nstime_monotonic_impl;
prof_time_res_t opt_prof_time_res =
prof_time_res_default;
prof_time_res_t opt_prof_time_res = prof_time_res_default;
const char *const prof_time_res_mode_names[] = {
"default",
"high",
"default",
"high",
};
static void
nstime_get_realtime(nstime_t *time) {
#if defined(JEMALLOC_HAVE_CLOCK_REALTIME) && !defined(_WIN32)
@ -302,5 +302,3 @@ nstime_prof_init_update(nstime_t *time) {
nstime_init_zero(time);
nstime_prof_update(time);
}

View file

@ -41,8 +41,8 @@ pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, pa_central_t *central,
}
if (pac_init(tsdn, &shard->pac, base, emap, &shard->edata_cache,
cur_time, pac_oversize_threshold, dirty_decay_ms, muzzy_decay_ms,
&stats->pac_stats, stats_mtx)) {
cur_time, pac_oversize_threshold, dirty_decay_ms,
muzzy_decay_ms, &stats->pac_stats, stats_mtx)) {
return true;
}
@ -68,11 +68,11 @@ bool
pa_shard_enable_hpa(tsdn_t *tsdn, pa_shard_t *shard,
const hpa_shard_opts_t *hpa_opts, const sec_opts_t *hpa_sec_opts) {
if (hpa_shard_init(&shard->hpa_shard, &shard->central->hpa, shard->emap,
shard->base, &shard->edata_cache, shard->ind, hpa_opts)) {
shard->base, &shard->edata_cache, shard->ind, hpa_opts)) {
return true;
}
if (sec_init(tsdn, &shard->hpa_sec, shard->base, &shard->hpa_shard.pai,
hpa_sec_opts)) {
hpa_sec_opts)) {
return true;
}
shard->ever_used_hpa = true;
@ -114,16 +114,16 @@ pa_shard_destroy(tsdn_t *tsdn, pa_shard_t *shard) {
static pai_t *
pa_get_pai(pa_shard_t *shard, edata_t *edata) {
return (edata_pai_get(edata) == EXTENT_PAI_PAC
? &shard->pac.pai : &shard->hpa_sec.pai);
return (edata_pai_get(edata) == EXTENT_PAI_PAC ? &shard->pac.pai
: &shard->hpa_sec.pai);
}
edata_t *
pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
bool slab, szind_t szind, bool zero, bool guarded,
bool *deferred_work_generated) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
witness_assert_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0);
assert(!guarded || alignment <= PAGE);
edata_t *edata = NULL;
@ -190,8 +190,8 @@ pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
size_t shrink_amount = old_size - new_size;
pai_t *pai = pa_get_pai(shard, edata);
bool error = pai_shrink(tsdn, pai, edata, old_size, new_size,
deferred_work_generated);
bool error = pai_shrink(
tsdn, pai, edata, old_size, new_size, deferred_work_generated);
if (error) {
return true;
}
@ -232,11 +232,11 @@ pa_decay_ms_get(pa_shard_t *shard, extent_state_t state) {
}
void
pa_shard_set_deferral_allowed(tsdn_t *tsdn, pa_shard_t *shard,
bool deferral_allowed) {
pa_shard_set_deferral_allowed(
tsdn_t *tsdn, pa_shard_t *shard, bool deferral_allowed) {
if (pa_shard_uses_hpa(shard)) {
hpa_shard_set_deferral_allowed(tsdn, &shard->hpa_shard,
deferral_allowed);
hpa_shard_set_deferral_allowed(
tsdn, &shard->hpa_shard, deferral_allowed);
}
}
@ -260,8 +260,8 @@ pa_shard_time_until_deferred_work(tsdn_t *tsdn, pa_shard_t *shard) {
}
if (pa_shard_uses_hpa(shard)) {
uint64_t hpa =
pai_time_until_deferred_work(tsdn, &shard->hpa_shard.pai);
uint64_t hpa = pai_time_until_deferred_work(
tsdn, &shard->hpa_shard.pai);
if (hpa < time) {
time = hpa;
}

View file

@ -94,8 +94,8 @@ pa_shard_nmuzzy(pa_shard_t *shard) {
}
void
pa_shard_basic_stats_merge(pa_shard_t *shard, size_t *nactive, size_t *ndirty,
size_t *nmuzzy) {
pa_shard_basic_stats_merge(
pa_shard_t *shard, size_t *nactive, size_t *ndirty, size_t *nmuzzy) {
*nactive += pa_shard_nactive(shard);
*ndirty += pa_shard_ndirty(shard);
*nmuzzy += pa_shard_nmuzzy(shard);
@ -122,29 +122,29 @@ pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard,
locked_inc_u64_unsynchronized(
&pa_shard_stats_out->pac_stats.decay_dirty.npurge,
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
&shard->pac.stats->decay_dirty.npurge));
&shard->pac.stats->decay_dirty.npurge));
locked_inc_u64_unsynchronized(
&pa_shard_stats_out->pac_stats.decay_dirty.nmadvise,
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
&shard->pac.stats->decay_dirty.nmadvise));
&shard->pac.stats->decay_dirty.nmadvise));
locked_inc_u64_unsynchronized(
&pa_shard_stats_out->pac_stats.decay_dirty.purged,
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
&shard->pac.stats->decay_dirty.purged));
&shard->pac.stats->decay_dirty.purged));
/* Muzzy decay stats */
locked_inc_u64_unsynchronized(
&pa_shard_stats_out->pac_stats.decay_muzzy.npurge,
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
&shard->pac.stats->decay_muzzy.npurge));
&shard->pac.stats->decay_muzzy.npurge));
locked_inc_u64_unsynchronized(
&pa_shard_stats_out->pac_stats.decay_muzzy.nmadvise,
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
&shard->pac.stats->decay_muzzy.nmadvise));
&shard->pac.stats->decay_muzzy.nmadvise));
locked_inc_u64_unsynchronized(
&pa_shard_stats_out->pac_stats.decay_muzzy.purged,
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
&shard->pac.stats->decay_muzzy.purged));
&shard->pac.stats->decay_muzzy.purged));
atomic_load_add_store_zu(&pa_shard_stats_out->pac_stats.abandoned_vm,
atomic_load_zu(&shard->pac.stats->abandoned_vm, ATOMIC_RELAXED));
@ -157,8 +157,8 @@ pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard,
retained = ecache_nextents_get(&shard->pac.ecache_retained, i);
dirty_bytes = ecache_nbytes_get(&shard->pac.ecache_dirty, i);
muzzy_bytes = ecache_nbytes_get(&shard->pac.ecache_muzzy, i);
retained_bytes = ecache_nbytes_get(&shard->pac.ecache_retained,
i);
retained_bytes = ecache_nbytes_get(
&shard->pac.ecache_retained, i);
estats_out[i].ndirty = dirty;
estats_out[i].nmuzzy = muzzy;

186
src/pac.c
View file

@ -7,18 +7,18 @@
static edata_t *pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size,
size_t alignment, bool zero, bool guarded, bool frequent_reuse,
bool *deferred_work_generated);
static bool pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated);
static bool pac_shrink_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
size_t old_size, size_t new_size, bool *deferred_work_generated);
static void pac_dalloc_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
bool *deferred_work_generated);
static bool pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated);
static bool pac_shrink_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
size_t old_size, size_t new_size, bool *deferred_work_generated);
static void pac_dalloc_impl(
tsdn_t *tsdn, pai_t *self, edata_t *edata, bool *deferred_work_generated);
static uint64_t pac_time_until_deferred_work(tsdn_t *tsdn, pai_t *self);
static inline void
pac_decay_data_get(pac_t *pac, extent_state_t state,
decay_t **r_decay, pac_decay_stats_t **r_decay_stats, ecache_t **r_ecache) {
switch(state) {
pac_decay_data_get(pac_t *pac, extent_state_t state, decay_t **r_decay,
pac_decay_stats_t **r_decay_stats, ecache_t **r_ecache) {
switch (state) {
case extent_state_dirty:
*r_decay = &pac->decay_dirty;
*r_decay_stats = &pac->stats->decay_dirty;
@ -51,7 +51,7 @@ pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
* merging/splitting extents is non-trivial.
*/
if (ecache_init(tsdn, &pac->ecache_dirty, extent_state_dirty, ind,
/* delay_coalesce */ true)) {
/* delay_coalesce */ true)) {
return true;
}
/*
@ -59,7 +59,7 @@ pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
* the critical path much less often than for dirty extents.
*/
if (ecache_init(tsdn, &pac->ecache_muzzy, extent_state_muzzy, ind,
/* delay_coalesce */ false)) {
/* delay_coalesce */ false)) {
return true;
}
/*
@ -68,17 +68,17 @@ pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
* coalescing), but also because operations on retained extents are not
* in the critical path.
*/
if (ecache_init(tsdn, &pac->ecache_retained, extent_state_retained,
ind, /* delay_coalesce */ false)) {
if (ecache_init(tsdn, &pac->ecache_retained, extent_state_retained, ind,
/* delay_coalesce */ false)) {
return true;
}
exp_grow_init(&pac->exp_grow);
if (malloc_mutex_init(&pac->grow_mtx, "extent_grow",
WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
return true;
}
atomic_store_zu(&pac->oversize_threshold, pac_oversize_threshold,
ATOMIC_RELAXED);
atomic_store_zu(
&pac->oversize_threshold, pac_oversize_threshold, ATOMIC_RELAXED);
if (decay_init(&pac->decay_dirty, cur_time, dirty_decay_ms)) {
return true;
}
@ -112,7 +112,8 @@ pac_may_have_muzzy(pac_t *pac) {
return pac_decay_ms_get(pac, extent_state_muzzy) != 0;
}
static size_t pac_alloc_retained_batched_size(size_t size) {
static size_t
pac_alloc_retained_batched_size(size_t size) {
if (size > SC_LARGE_MAXCLASS) {
/*
* A valid input with usize SC_LARGE_MAXCLASS could still
@ -124,8 +125,8 @@ static size_t pac_alloc_retained_batched_size(size_t size) {
}
size_t batched_size = sz_s2u_compute_using_delta(size);
size_t next_hugepage_size = HUGEPAGE_CEILING(size);
return batched_size > next_hugepage_size? next_hugepage_size:
batched_size;
return batched_size > next_hugepage_size ? next_hugepage_size
: batched_size;
}
static edata_t *
@ -162,8 +163,8 @@ pac_alloc_real(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size,
* limits. This choice should be reevaluated if
* pac_alloc_retained_batched_size is changed to be more aggressive.
*/
if (sz_large_size_classes_disabled() && edata == NULL &&
(maps_coalesce || opt_retain)) {
if (sz_large_size_classes_disabled() && edata == NULL
&& (maps_coalesce || opt_retain)) {
size_t batched_size = pac_alloc_retained_batched_size(size);
/*
* Note that ecache_alloc_grow will try to retrieve virtual
@ -173,12 +174,12 @@ pac_alloc_real(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size,
* with opt_retain off.
*/
edata = ecache_alloc_grow(tsdn, pac, ehooks,
&pac->ecache_retained, NULL, batched_size,
alignment, zero, guarded);
&pac->ecache_retained, NULL, batched_size, alignment, zero,
guarded);
if (edata != NULL && batched_size > size) {
edata_t *trail = extent_split_wrapper(tsdn, pac,
ehooks, edata, size, batched_size - size,
edata_t *trail = extent_split_wrapper(tsdn, pac, ehooks,
edata, size, batched_size - size,
/* holding_core_locks */ false);
if (trail == NULL) {
ecache_dalloc(tsdn, pac, ehooks,
@ -203,8 +204,8 @@ pac_alloc_real(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size,
}
if (config_stats && newly_mapped_size != 0) {
atomic_fetch_add_zu(&pac->stats->pac_mapped,
newly_mapped_size, ATOMIC_RELAXED);
atomic_fetch_add_zu(
&pac->stats->pac_mapped, newly_mapped_size, ATOMIC_RELAXED);
}
return edata;
@ -217,8 +218,8 @@ pac_alloc_new_guarded(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size,
edata_t *edata;
if (san_bump_enabled() && frequent_reuse) {
edata = san_bump_alloc(tsdn, &pac->sba, pac, ehooks, size,
zero);
edata = san_bump_alloc(
tsdn, &pac->sba, pac, ehooks, size, zero);
} else {
size_t size_with_guards = san_two_side_guarded_sz(size);
/* Alloc a non-guarded extent first.*/
@ -227,12 +228,12 @@ pac_alloc_new_guarded(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size,
if (edata != NULL) {
/* Add guards around it. */
assert(edata_size_get(edata) == size_with_guards);
san_guard_pages_two_sided(tsdn, ehooks, edata,
pac->emap, true);
san_guard_pages_two_sided(
tsdn, ehooks, edata, pac->emap, true);
}
}
assert(edata == NULL || (edata_guarded_get(edata) &&
edata_size_get(edata) == size));
assert(edata == NULL
|| (edata_guarded_get(edata) && edata_size_get(edata) == size));
return edata;
}
@ -241,7 +242,7 @@ static edata_t *
pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment,
bool zero, bool guarded, bool frequent_reuse,
bool *deferred_work_generated) {
pac_t *pac = (pac_t *)self;
pac_t *pac = (pac_t *)self;
ehooks_t *ehooks = pac_ehooks_get(pac);
edata_t *edata = NULL;
@ -252,13 +253,13 @@ pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment,
* for such allocations would always return NULL.
* */
if (!guarded || frequent_reuse) {
edata = pac_alloc_real(tsdn, pac, ehooks, size, alignment,
zero, guarded);
edata = pac_alloc_real(
tsdn, pac, ehooks, size, alignment, zero, guarded);
}
if (edata == NULL && guarded) {
/* No cached guarded extents; creating a new one. */
edata = pac_alloc_new_guarded(tsdn, pac, ehooks, size,
alignment, zero, frequent_reuse);
edata = pac_alloc_new_guarded(
tsdn, pac, ehooks, size, alignment, zero, frequent_reuse);
}
return edata;
@ -267,7 +268,7 @@ pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment,
static bool
pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
size_t new_size, bool zero, bool *deferred_work_generated) {
pac_t *pac = (pac_t *)self;
pac_t *pac = (pac_t *)self;
ehooks_t *ehooks = pac_ehooks_get(pac);
size_t mapped_add = 0;
@ -296,8 +297,8 @@ pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
return true;
}
if (config_stats && mapped_add > 0) {
atomic_fetch_add_zu(&pac->stats->pac_mapped, mapped_add,
ATOMIC_RELAXED);
atomic_fetch_add_zu(
&pac->stats->pac_mapped, mapped_add, ATOMIC_RELAXED);
}
return false;
}
@ -305,7 +306,7 @@ pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
static bool
pac_shrink_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
size_t new_size, bool *deferred_work_generated) {
pac_t *pac = (pac_t *)self;
pac_t *pac = (pac_t *)self;
ehooks_t *ehooks = pac_ehooks_get(pac);
size_t shrink_amount = old_size - new_size;
@ -325,9 +326,9 @@ pac_shrink_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
}
static void
pac_dalloc_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
bool *deferred_work_generated) {
pac_t *pac = (pac_t *)self;
pac_dalloc_impl(
tsdn_t *tsdn, pai_t *self, edata_t *edata, bool *deferred_work_generated) {
pac_t *pac = (pac_t *)self;
ehooks_t *ehooks = pac_ehooks_get(pac);
if (edata_guarded_get(edata)) {
@ -344,10 +345,10 @@ pac_dalloc_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
* guarded).
*/
if (!edata_slab_get(edata) || !maps_coalesce) {
assert(edata_size_get(edata) >= SC_LARGE_MINCLASS ||
!maps_coalesce);
san_unguard_pages_two_sided(tsdn, ehooks, edata,
pac->emap);
assert(edata_size_get(edata) >= SC_LARGE_MINCLASS
|| !maps_coalesce);
san_unguard_pages_two_sided(
tsdn, ehooks, edata, pac->emap);
}
}
@ -362,8 +363,8 @@ pac_ns_until_purge(tsdn_t *tsdn, decay_t *decay, size_t npages) {
/* Use minimal interval if decay is contended. */
return BACKGROUND_THREAD_DEFERRED_MIN;
}
uint64_t result = decay_ns_until_purge(decay, npages,
ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD);
uint64_t result = decay_ns_until_purge(
decay, npages, ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD);
malloc_mutex_unlock(tsdn, &decay->mtx);
return result;
@ -372,18 +373,16 @@ pac_ns_until_purge(tsdn_t *tsdn, decay_t *decay, size_t npages) {
static uint64_t
pac_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) {
uint64_t time;
pac_t *pac = (pac_t *)self;
pac_t *pac = (pac_t *)self;
time = pac_ns_until_purge(tsdn,
&pac->decay_dirty,
ecache_npages_get(&pac->ecache_dirty));
time = pac_ns_until_purge(
tsdn, &pac->decay_dirty, ecache_npages_get(&pac->ecache_dirty));
if (time == BACKGROUND_THREAD_DEFERRED_MIN) {
return time;
}
uint64_t muzzy = pac_ns_until_purge(tsdn,
&pac->decay_muzzy,
ecache_npages_get(&pac->ecache_muzzy));
uint64_t muzzy = pac_ns_until_purge(
tsdn, &pac->decay_muzzy, ecache_npages_get(&pac->ecache_muzzy));
if (muzzy < time) {
time = muzzy;
}
@ -391,8 +390,8 @@ pac_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) {
}
bool
pac_retain_grow_limit_get_set(tsdn_t *tsdn, pac_t *pac, size_t *old_limit,
size_t *new_limit) {
pac_retain_grow_limit_get_set(
tsdn_t *tsdn, pac_t *pac, size_t *old_limit, size_t *new_limit) {
pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0);
if (new_limit != NULL) {
size_t limit = *new_limit;
@ -418,15 +417,15 @@ static size_t
pac_stash_decayed(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
size_t npages_limit, size_t npages_decay_max,
edata_list_inactive_t *result) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
witness_assert_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0);
ehooks_t *ehooks = pac_ehooks_get(pac);
/* Stash extents according to npages_limit. */
size_t nstashed = 0;
while (nstashed < npages_decay_max) {
edata_t *edata = ecache_evict(tsdn, pac, ehooks, ecache,
npages_limit);
edata_t *edata = ecache_evict(
tsdn, pac, ehooks, ecache, npages_limit);
if (edata == NULL) {
break;
}
@ -443,8 +442,8 @@ decay_with_process_madvise(edata_list_inactive_t *decay_extents) {
#ifndef JEMALLOC_HAVE_PROCESS_MADVISE
return true;
#else
assert(opt_process_madvise_max_batch <=
PROCESS_MADVISE_MAX_BATCH_LIMIT);
assert(
opt_process_madvise_max_batch <= PROCESS_MADVISE_MAX_BATCH_LIMIT);
size_t len = opt_process_madvise_max_batch;
VARIABLE_ARRAY(struct iovec, vec, len);
@ -458,8 +457,8 @@ decay_with_process_madvise(edata_list_inactive_t *decay_extents) {
total_bytes += pages_bytes;
cur++;
if (cur == len) {
bool err = pages_purge_process_madvise(vec, len,
total_bytes);
bool err = pages_purge_process_madvise(
vec, len, total_bytes);
if (err) {
return true;
}
@ -489,14 +488,14 @@ pac_decay_stashed(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
bool try_muzzy = !fully_decay
&& pac_decay_ms_get(pac, extent_state_muzzy) != 0;
bool purge_to_retained = !try_muzzy ||
ecache->state == extent_state_muzzy;
bool purge_to_retained = !try_muzzy
|| ecache->state == extent_state_muzzy;
/*
* Attempt process_madvise only if 1) enabled, 2) purging to retained,
* and 3) not using custom hooks.
*/
bool try_process_madvise = (opt_process_madvise_max_batch > 0) &&
purge_to_retained && ehooks_dalloc_will_fail(ehooks);
bool try_process_madvise = (opt_process_madvise_max_batch > 0)
&& purge_to_retained && ehooks_dalloc_will_fail(ehooks);
bool already_purged;
if (try_process_madvise) {
@ -511,8 +510,8 @@ pac_decay_stashed(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
already_purged = false;
}
for (edata_t *edata = edata_list_inactive_first(decay_extents); edata !=
NULL; edata = edata_list_inactive_first(decay_extents)) {
for (edata_t *edata = edata_list_inactive_first(decay_extents);
edata != NULL; edata = edata_list_inactive_first(decay_extents)) {
edata_list_inactive_remove(decay_extents, edata);
size_t size = edata_size_get(edata);
@ -524,8 +523,8 @@ pac_decay_stashed(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
switch (ecache->state) {
case extent_state_dirty:
if (try_muzzy) {
err = extent_purge_lazy_wrapper(tsdn, ehooks,
edata, /* offset */ 0, size);
err = extent_purge_lazy_wrapper(
tsdn, ehooks, edata, /* offset */ 0, size);
if (!err) {
ecache_dalloc(tsdn, pac, ehooks,
&pac->ecache_muzzy, edata);
@ -535,8 +534,8 @@ pac_decay_stashed(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
JEMALLOC_FALLTHROUGH;
case extent_state_muzzy:
if (already_purged) {
extent_dalloc_wrapper_purged(tsdn, pac, ehooks,
edata);
extent_dalloc_wrapper_purged(
tsdn, pac, ehooks, edata);
} else {
extent_dalloc_wrapper(tsdn, pac, ehooks, edata);
}
@ -578,8 +577,8 @@ static void
pac_decay_to_limit(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay,
size_t npages_limit, size_t npages_decay_max) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 1);
witness_assert_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 1);
if (decay->purging || npages_decay_max == 0) {
return;
@ -589,8 +588,8 @@ pac_decay_to_limit(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
edata_list_inactive_t decay_extents;
edata_list_inactive_init(&decay_extents);
size_t npurge = pac_stash_decayed(tsdn, pac, ecache, npages_limit,
npages_decay_max, &decay_extents);
size_t npurge = pac_stash_decayed(
tsdn, pac, ecache, npages_limit, npages_decay_max, &decay_extents);
if (npurge != 0) {
size_t npurged = pac_decay_stashed(tsdn, pac, decay,
decay_stats, ecache, fully_decay, &decay_extents);
@ -611,8 +610,8 @@ pac_decay_all(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
static void
pac_decay_try_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
pac_decay_stats_t *decay_stats, ecache_t *ecache,
size_t current_npages, size_t npages_limit) {
pac_decay_stats_t *decay_stats, ecache_t *ecache, size_t current_npages,
size_t npages_limit) {
if (current_npages > npages_limit) {
pac_decay_to_limit(tsdn, pac, decay, decay_stats, ecache,
/* fully_decay */ false, npages_limit,
@ -647,8 +646,8 @@ pac_maybe_decay_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
nstime_t time;
nstime_init_update(&time);
size_t npages_current = ecache_npages_get(ecache);
bool epoch_advanced = decay_maybe_advance_epoch(decay, &time,
npages_current);
bool epoch_advanced = decay_maybe_advance_epoch(
decay, &time, npages_current);
if (eagerness == PAC_PURGE_ALWAYS
|| (epoch_advanced && eagerness == PAC_PURGE_ON_EPOCH_ADVANCE)) {
size_t npages_limit = decay_npages_limit_get(decay);
@ -662,9 +661,9 @@ pac_maybe_decay_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
bool
pac_decay_ms_set(tsdn_t *tsdn, pac_t *pac, extent_state_t state,
ssize_t decay_ms, pac_purge_eagerness_t eagerness) {
decay_t *decay;
decay_t *decay;
pac_decay_stats_t *decay_stats;
ecache_t *ecache;
ecache_t *ecache;
pac_decay_data_get(pac, state, &decay, &decay_stats, &ecache);
if (!decay_ms_valid(decay_ms)) {
@ -691,9 +690,9 @@ pac_decay_ms_set(tsdn_t *tsdn, pac_t *pac, extent_state_t state,
ssize_t
pac_decay_ms_get(pac_t *pac, extent_state_t state) {
decay_t *decay;
decay_t *decay;
pac_decay_stats_t *decay_stats;
ecache_t *ecache;
ecache_t *ecache;
pac_decay_data_get(pac, state, &decay, &decay_stats, &ecache);
return decay_ms_read(decay);
}
@ -722,9 +721,10 @@ pac_destroy(tsdn_t *tsdn, pac_t *pac) {
* dss-based extents for later reuse.
*/
ehooks_t *ehooks = pac_ehooks_get(pac);
edata_t *edata;
while ((edata = ecache_evict(tsdn, pac, ehooks,
&pac->ecache_retained, 0)) != NULL) {
edata_t *edata;
while (
(edata = ecache_evict(tsdn, pac, ehooks, &pac->ecache_retained, 0))
!= NULL) {
extent_destroy_wrapper(tsdn, pac, ehooks, edata);
}
}

View file

@ -8,46 +8,42 @@
#include "jemalloc/internal/malloc_io.h"
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
#include <sys/sysctl.h>
#ifdef __FreeBSD__
#include <vm/vm_param.h>
#endif
# include <sys/sysctl.h>
# ifdef __FreeBSD__
# include <vm/vm_param.h>
# endif
#endif
#ifdef __NetBSD__
#include <sys/bitops.h> /* ilog2 */
# include <sys/bitops.h> /* ilog2 */
#endif
#ifdef JEMALLOC_HAVE_VM_MAKE_TAG
#define PAGES_FD_TAG VM_MAKE_TAG(254U)
# define PAGES_FD_TAG VM_MAKE_TAG(254U)
#else
#define PAGES_FD_TAG -1
# define PAGES_FD_TAG -1
#endif
#if defined(JEMALLOC_HAVE_PRCTL) && defined(JEMALLOC_PAGEID)
#include <sys/prctl.h>
#ifndef PR_SET_VMA
#define PR_SET_VMA 0x53564d41
#define PR_SET_VMA_ANON_NAME 0
#endif
# include <sys/prctl.h>
# ifndef PR_SET_VMA
# define PR_SET_VMA 0x53564d41
# define PR_SET_VMA_ANON_NAME 0
# endif
#endif
/******************************************************************************/
/* Data. */
/* Actual operating system page size, detected during bootstrap, <= PAGE. */
size_t os_page;
size_t os_page;
#ifndef _WIN32
# define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE)
# define PAGES_PROT_DECOMMIT (PROT_NONE)
static int mmap_flags;
# define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE)
# define PAGES_PROT_DECOMMIT (PROT_NONE)
static int mmap_flags;
#endif
static bool os_overcommits;
static bool os_overcommits;
const char *const thp_mode_names[] = {
"default",
"always",
"never",
"not supported"
};
"default", "always", "never", "not supported"};
thp_mode_t opt_thp = THP_MODE_DEFAULT;
thp_mode_t init_system_thp_mode;
@ -66,15 +62,16 @@ static int madvise_dont_need_zeros_is_faulty = -1;
*
* [1]: https://patchwork.kernel.org/patch/10576637/
*/
static int madvise_MADV_DONTNEED_zeroes_pages(void)
{
static int
madvise_MADV_DONTNEED_zeroes_pages(void) {
size_t size = PAGE;
void * addr = mmap(NULL, size, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
void *addr = mmap(NULL, size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (addr == MAP_FAILED) {
malloc_write("<jemalloc>: Cannot allocate memory for "
malloc_write(
"<jemalloc>: Cannot allocate memory for "
"MADV_DONTNEED check\n");
if (opt_abort) {
abort();
@ -94,7 +91,8 @@ static int madvise_MADV_DONTNEED_zeroes_pages(void)
}
if (munmap(addr, size) != 0) {
malloc_write("<jemalloc>: Cannot deallocate memory for "
malloc_write(
"<jemalloc>: Cannot deallocate memory for "
"MADV_DONTNEED check\n");
if (opt_abort) {
abort();
@ -106,18 +104,18 @@ static int madvise_MADV_DONTNEED_zeroes_pages(void)
#endif
#ifdef JEMALLOC_PAGEID
static int os_page_id(void *addr, size_t size, const char *name)
{
#ifdef JEMALLOC_HAVE_PRCTL
static int
os_page_id(void *addr, size_t size, const char *name) {
# ifdef JEMALLOC_HAVE_PRCTL
/*
* While parsing `/proc/<pid>/maps` file, the block could appear as
* 7f4836000000-7f4836800000 rw-p 00000000 00:00 0 [anon:jemalloc_pg_overcommit]`
*/
return prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, (uintptr_t)addr, size,
(uintptr_t)name);
#else
# else
return 0;
#endif
# endif
}
#endif
@ -156,7 +154,7 @@ os_pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
*/
{
int flags = mmap_flags;
#ifdef __NetBSD__
# ifdef __NetBSD__
/*
* On NetBSD PAGE for a platform is defined to the
* maximum page size of all machine architectures
@ -167,7 +165,7 @@ os_pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
unsigned int a = ilog2(MAX(alignment, PAGE));
flags |= MAP_ALIGNED(a);
}
#endif
# endif
int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
ret = mmap(addr, size, prot, flags, PAGES_FD_TAG, 0);
@ -184,8 +182,8 @@ os_pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
ret = NULL;
}
#endif
assert(ret == NULL || (addr == NULL && ret != addr) || (addr != NULL &&
ret == addr));
assert(ret == NULL || (addr == NULL && ret != addr)
|| (addr != NULL && ret == addr));
#ifdef JEMALLOC_PAGEID
int n = os_page_id(ret, size,
os_overcommits ? "jemalloc_pg_overcommit" : "jemalloc_pg");
@ -195,8 +193,8 @@ os_pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
}
static void *
os_pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
bool *commit) {
os_pages_trim(
void *addr, size_t alloc_size, size_t leadsize, size_t size, bool *commit) {
void *ret = (void *)((byte_t *)addr + leadsize);
assert(alloc_size >= leadsize + size);
@ -237,13 +235,15 @@ os_pages_unmap(void *addr, size_t size) {
char buf[BUFERROR_BUF];
buferror(get_errno(), buf, sizeof(buf));
malloc_printf("<jemalloc>: Error in "
malloc_printf(
"<jemalloc>: Error in "
#ifdef _WIN32
"VirtualFree"
#else
"munmap"
#endif
"(): %s\n", buf);
"(): %s\n",
buf);
if (opt_abort) {
abort();
}
@ -350,13 +350,14 @@ os_pages_commit(void *addr, size_t size, bool commit) {
assert(PAGE_CEILING(size) == size);
#ifdef _WIN32
return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT,
PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT)));
return (commit
? (addr != VirtualAlloc(addr, size, MEM_COMMIT, PAGE_READWRITE))
: (!VirtualFree(addr, size, MEM_DECOMMIT)));
#else
{
int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED,
PAGES_FD_TAG, 0);
int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
void *result = mmap(
addr, size, prot, mmap_flags | MAP_FIXED, PAGES_FD_TAG, 0);
if (result == MAP_FAILED) {
return true;
}
@ -395,8 +396,8 @@ pages_decommit(void *addr, size_t size) {
void
pages_mark_guards(void *head, void *tail) {
assert(head != NULL || tail != NULL);
assert(head == NULL || tail == NULL ||
(uintptr_t)head < (uintptr_t)tail);
assert(
head == NULL || tail == NULL || (uintptr_t)head < (uintptr_t)tail);
#ifdef JEMALLOC_HAVE_MPROTECT
if (head != NULL) {
mprotect(head, PAGE, PROT_NONE);
@ -418,13 +419,12 @@ pages_mark_guards(void *head, void *tail) {
void
pages_unmark_guards(void *head, void *tail) {
assert(head != NULL || tail != NULL);
assert(head == NULL || tail == NULL ||
(uintptr_t)head < (uintptr_t)tail);
assert(
head == NULL || tail == NULL || (uintptr_t)head < (uintptr_t)tail);
#ifdef JEMALLOC_HAVE_MPROTECT
bool head_and_tail = (head != NULL) && (tail != NULL);
size_t range = head_and_tail ?
(uintptr_t)tail - (uintptr_t)head + PAGE :
SIZE_T_MAX;
bool head_and_tail = (head != NULL) && (tail != NULL);
size_t range = head_and_tail ? (uintptr_t)tail - (uintptr_t)head + PAGE
: SIZE_T_MAX;
/*
* The amount of work that the kernel does in mprotect depends on the
* range argument. SC_LARGE_MINCLASS is an arbitrary threshold chosen
@ -473,17 +473,18 @@ pages_purge_lazy(void *addr, size_t size) {
return false;
#elif defined(JEMALLOC_PURGE_MADVISE_FREE)
return (madvise(addr, size,
# ifdef MADV_FREE
MADV_FREE
# else
JEMALLOC_MADV_FREE
# endif
) != 0);
#elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
!defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)
# ifdef MADV_FREE
MADV_FREE
# else
JEMALLOC_MADV_FREE
# endif
)
!= 0);
#elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED) \
&& !defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)
return (madvise(addr, size, MADV_DONTNEED) != 0);
#elif defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED) && \
!defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS)
#elif defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED) \
&& !defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS)
return (posix_madvise(addr, size, POSIX_MADV_DONTNEED) != 0);
#else
not_reached();
@ -499,14 +500,14 @@ pages_purge_forced(void *addr, size_t size) {
return true;
}
#if defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)
return (unlikely(madvise_dont_need_zeros_is_faulty) ||
madvise(addr, size, MADV_DONTNEED) != 0);
#elif defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED) && \
defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS)
return (unlikely(madvise_dont_need_zeros_is_faulty) ||
posix_madvise(addr, size, POSIX_MADV_DONTNEED) != 0);
#if defined(JEMALLOC_PURGE_MADVISE_DONTNEED) \
&& defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)
return (unlikely(madvise_dont_need_zeros_is_faulty)
|| madvise(addr, size, MADV_DONTNEED) != 0);
#elif defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED) \
&& defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS)
return (unlikely(madvise_dont_need_zeros_is_faulty)
|| posix_madvise(addr, size, POSIX_MADV_DONTNEED) != 0);
#elif defined(JEMALLOC_MAPS_COALESCE)
/* Try to overlay a new demand-zeroed mapping. */
return pages_commit(addr, size);
@ -579,13 +580,13 @@ pages_collapse(void *addr, size_t size) {
* means we can't call pages_collapse on freshly mapped memory region.
* See madvise(2) man page for more details.
*/
#if defined(JEMALLOC_HAVE_MADVISE_COLLAPSE) && \
(defined(MADV_COLLAPSE) || defined(JEMALLOC_MADV_COLLAPSE))
# if defined(MADV_COLLAPSE)
#if defined(JEMALLOC_HAVE_MADVISE_COLLAPSE) \
&& (defined(MADV_COLLAPSE) || defined(JEMALLOC_MADV_COLLAPSE))
# if defined(MADV_COLLAPSE)
return (madvise(addr, size, MADV_COLLAPSE) != 0);
# elif defined(JEMALLOC_MADV_COLLAPSE)
# elif defined(JEMALLOC_MADV_COLLAPSE)
return (madvise(addr, size, JEMALLOC_MADV_COLLAPSE) != 0);
# endif
# endif
#else
return true;
#endif
@ -618,8 +619,8 @@ pages_dodump(void *addr, size_t size) {
}
#ifdef JEMALLOC_HAVE_PROCESS_MADVISE
#include <sys/mman.h>
#include <sys/syscall.h>
# include <sys/mman.h>
# include <sys/syscall.h>
static int pidfd;
static bool
@ -640,15 +641,16 @@ init_process_madvise(void) {
return false;
}
#ifdef SYS_process_madvise
#define JE_SYS_PROCESS_MADVISE_NR SYS_process_madvise
#else
#define JE_SYS_PROCESS_MADVISE_NR EXPERIMENTAL_SYS_PROCESS_MADVISE_NR
#endif
# ifdef SYS_process_madvise
# define JE_SYS_PROCESS_MADVISE_NR SYS_process_madvise
# else
# define JE_SYS_PROCESS_MADVISE_NR \
EXPERIMENTAL_SYS_PROCESS_MADVISE_NR
# endif
static bool
pages_purge_process_madvise_impl(void *vec, size_t vec_len,
size_t total_bytes) {
pages_purge_process_madvise_impl(
void *vec, size_t vec_len, size_t total_bytes) {
size_t purged_bytes = (size_t)syscall(JE_SYS_PROCESS_MADVISE_NR, pidfd,
(struct iovec *)vec, vec_len, MADV_DONTNEED, 0);
@ -663,8 +665,8 @@ init_process_madvise(void) {
}
static bool
pages_purge_process_madvise_impl(void *vec, size_t vec_len,
size_t total_bytes) {
pages_purge_process_madvise_impl(
void *vec, size_t vec_len, size_t total_bytes) {
not_reached();
return true;
}
@ -700,11 +702,11 @@ os_page_detect(void) {
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
static bool
os_overcommits_sysctl(void) {
int vm_overcommit;
int vm_overcommit;
size_t sz;
sz = sizeof(vm_overcommit);
#if defined(__FreeBSD__) && defined(VM_OVERCOMMIT)
# if defined(__FreeBSD__) && defined(VM_OVERCOMMIT)
int mib[2];
mib[0] = CTL_VM;
@ -712,11 +714,11 @@ os_overcommits_sysctl(void) {
if (sysctl(mib, 2, &vm_overcommit, &sz, NULL, 0) != 0) {
return false; /* Error. */
}
#else
# else
if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0) {
return false; /* Error. */
}
#endif
# endif
return ((vm_overcommit & 0x3) == 0);
}
@ -730,17 +732,18 @@ os_overcommits_sysctl(void) {
*/
static bool
os_overcommits_proc(void) {
int fd;
int fd;
char buf[1];
#if defined(O_CLOEXEC)
fd = malloc_open("/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC);
#else
# if defined(O_CLOEXEC)
fd = malloc_open(
"/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC);
# else
fd = malloc_open("/proc/sys/vm/overcommit_memory", O_RDONLY);
if (fd != -1) {
fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
}
#endif
# endif
if (fd == -1) {
return false; /* Error. */
@ -763,20 +766,20 @@ os_overcommits_proc(void) {
#endif
void
pages_set_thp_state (void *ptr, size_t size) {
pages_set_thp_state(void *ptr, size_t size) {
if (opt_thp == thp_mode_default || opt_thp == init_system_thp_mode) {
return;
}
assert(opt_thp != thp_mode_not_supported &&
init_system_thp_mode != thp_mode_not_supported);
assert(opt_thp != thp_mode_not_supported
&& init_system_thp_mode != thp_mode_not_supported);
if (opt_thp == thp_mode_always
&& init_system_thp_mode != thp_mode_never) {
assert(init_system_thp_mode == thp_mode_default);
pages_huge_unaligned(ptr, size);
} else if (opt_thp == thp_mode_never) {
assert(init_system_thp_mode == thp_mode_default ||
init_system_thp_mode == thp_mode_always);
assert(init_system_thp_mode == thp_mode_default
|| init_system_thp_mode == thp_mode_always);
pages_nohuge_unaligned(ptr, size);
}
}
@ -794,7 +797,7 @@ init_thp_state(void) {
static const char sys_state_madvise[] = "always [madvise] never\n";
static const char sys_state_always[] = "[always] madvise never\n";
static const char sys_state_never[] = "always madvise [never]\n";
char buf[sizeof(sys_state_madvise)];
char buf[sizeof(sys_state_madvise)];
int fd = malloc_open(
"/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
@ -839,10 +842,13 @@ pages_boot(void) {
#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
if (!opt_trust_madvise) {
madvise_dont_need_zeros_is_faulty = !madvise_MADV_DONTNEED_zeroes_pages();
madvise_dont_need_zeros_is_faulty =
!madvise_MADV_DONTNEED_zeroes_pages();
if (madvise_dont_need_zeros_is_faulty) {
malloc_write("<jemalloc>: MADV_DONTNEED does not work (memset will be used instead)\n");
malloc_write("<jemalloc>: (This is the expected behaviour if you are running under QEMU)\n");
malloc_write(
"<jemalloc>: MADV_DONTNEED does not work (memset will be used instead)\n");
malloc_write(
"<jemalloc>: (This is the expected behaviour if you are running under QEMU)\n");
}
} else {
/* In case opt_trust_madvise is disable,
@ -859,11 +865,11 @@ pages_boot(void) {
os_overcommits = os_overcommits_sysctl();
#elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY)
os_overcommits = os_overcommits_proc();
# ifdef MAP_NORESERVE
# ifdef MAP_NORESERVE
if (os_overcommits) {
mmap_flags |= MAP_NORESERVE;
}
# endif
# endif
#elif defined(__NetBSD__)
os_overcommits = true;
#else
@ -879,8 +885,9 @@ pages_boot(void) {
#else
/* Detect lazy purge runtime support. */
if (pages_can_purge_lazy) {
bool committed = false;
void *madv_free_page = os_pages_map(NULL, PAGE, PAGE, &committed);
bool committed = false;
void *madv_free_page = os_pages_map(
NULL, PAGE, PAGE, &committed);
if (madv_free_page == NULL) {
return true;
}

View file

@ -6,7 +6,7 @@ pai_alloc_batch_default(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs,
edata_list_active_t *results, bool frequent_reuse,
bool *deferred_work_generated) {
for (size_t i = 0; i < nallocs; i++) {
bool deferred_by_alloc = false;
bool deferred_by_alloc = false;
edata_t *edata = pai_alloc(tsdn, self, size, PAGE,
/* zero */ false, /* guarded */ false, frequent_reuse,
&deferred_by_alloc);
@ -20,8 +20,8 @@ pai_alloc_batch_default(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs,
}
void
pai_dalloc_batch_default(tsdn_t *tsdn, pai_t *self,
edata_list_active_t *list, bool *deferred_work_generated) {
pai_dalloc_batch_default(tsdn_t *tsdn, pai_t *self, edata_list_active_t *list,
bool *deferred_work_generated) {
edata_t *edata;
while ((edata = edata_list_active_first(list)) != NULL) {
bool deferred_by_dalloc = false;

View file

@ -12,7 +12,7 @@ void
peak_event_update(tsd_t *tsd) {
uint64_t alloc = tsd_thread_allocated_get(tsd);
uint64_t dalloc = tsd_thread_deallocated_get(tsd);
peak_t *peak = tsd_peakp_get(tsd);
peak_t *peak = tsd_peakp_get(tsd);
peak_update(peak, alloc, dalloc);
}
@ -32,7 +32,7 @@ void
peak_event_zero(tsd_t *tsd) {
uint64_t alloc = tsd_thread_allocated_get(tsd);
uint64_t dalloc = tsd_thread_deallocated_get(tsd);
peak_t *peak = tsd_peakp_get(tsd);
peak_t *peak = tsd_peakp_get(tsd);
peak_set_zero(peak, alloc, dalloc);
}
@ -65,8 +65,8 @@ peak_event_enabled(void) {
/* Handles alloc and dalloc */
te_base_cb_t peak_te_handler = {
.enabled = &peak_event_enabled,
.new_event_wait = &peak_event_new_event_wait,
.postponed_event_wait = &peak_event_postponed_event_wait,
.event_handler = &peak_event_handler,
.enabled = &peak_event_enabled,
.new_event_wait = &peak_event_new_event_wait,
.postponed_event_wait = &peak_event_postponed_event_wait,
.event_handler = &peak_event_handler,
};

View file

@ -24,21 +24,21 @@
/* Data. */
bool opt_prof = false;
bool opt_prof_active = true;
bool opt_prof_thread_active_init = true;
bool opt_prof = false;
bool opt_prof_active = true;
bool opt_prof_thread_active_init = true;
unsigned opt_prof_bt_max = PROF_BT_MAX_DEFAULT;
size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
bool opt_prof_gdump = false;
bool opt_prof_final = false;
bool opt_prof_leak = false;
bool opt_prof_leak_error = false;
bool opt_prof_accum = false;
bool opt_prof_pid_namespace = false;
char opt_prof_prefix[PROF_DUMP_FILENAME_LEN];
bool opt_prof_sys_thread_name = false;
bool opt_prof_unbias = true;
size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
bool opt_prof_gdump = false;
bool opt_prof_final = false;
bool opt_prof_leak = false;
bool opt_prof_leak_error = false;
bool opt_prof_accum = false;
bool opt_prof_pid_namespace = false;
char opt_prof_prefix[PROF_DUMP_FILENAME_LEN];
bool opt_prof_sys_thread_name = false;
bool opt_prof_unbias = true;
/* Accessed via prof_sample_event_handler(). */
static counter_accum_t prof_idump_accumulated;
@ -47,28 +47,28 @@ static counter_accum_t prof_idump_accumulated;
* Initialized as opt_prof_active, and accessed via
* prof_active_[gs]et{_unlocked,}().
*/
bool prof_active_state;
bool prof_active_state;
static malloc_mutex_t prof_active_mtx;
/*
* Initialized as opt_prof_thread_active_init, and accessed via
* prof_thread_active_init_[gs]et().
*/
static bool prof_thread_active_init;
static bool prof_thread_active_init;
static malloc_mutex_t prof_thread_active_init_mtx;
/*
* Initialized as opt_prof_gdump, and accessed via
* prof_gdump_[gs]et{_unlocked,}().
*/
bool prof_gdump_val;
bool prof_gdump_val;
static malloc_mutex_t prof_gdump_mtx;
uint64_t prof_interval = 0;
size_t lg_prof_sample;
static uint64_t next_thr_uid;
static uint64_t next_thr_uid;
static malloc_mutex_t next_thr_uid_mtx;
/* Do not dump any profiles until bootstrapping is complete. */
@ -113,16 +113,16 @@ prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx) {
}
void
prof_malloc_sample_object(tsd_t *tsd, const void *ptr, size_t size,
size_t usize, prof_tctx_t *tctx) {
prof_malloc_sample_object(
tsd_t *tsd, const void *ptr, size_t size, size_t usize, prof_tctx_t *tctx) {
cassert(config_prof);
if (opt_prof_sys_thread_name) {
prof_sys_thread_name_fetch(tsd);
}
edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
ptr);
edata_t *edata = emap_edata_lookup(
tsd_tsdn(tsd), &arena_emap_global, ptr);
prof_info_set(tsd, edata, tctx, size);
szind_t szind = sz_size2index(usize);
@ -173,8 +173,8 @@ prof_malloc_sample_object(tsd_t *tsd, const void *ptr, size_t size,
}
void
prof_free_sampled_object(tsd_t *tsd, const void *ptr, size_t usize,
prof_info_t *prof_info) {
prof_free_sampled_object(
tsd_t *tsd, const void *ptr, size_t usize, prof_info_t *prof_info) {
cassert(config_prof);
assert(prof_info != NULL);
@ -279,10 +279,12 @@ prof_sample_new_event_wait(tsd_t *tsd) {
* otherwise bytes_until_sample would be 0 if u is exactly 1.0.
*/
uint64_t r = prng_lg_range_u64(tsd_prng_statep_get(tsd), 53);
double u = (r == 0U) ? 1.0 : (double)((long double)r *
(1.0L/9007199254740992.0L));
return (uint64_t)(log(u) /
log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
double u = (r == 0U)
? 1.0
: (double)((long double)r * (1.0L / 9007199254740992.0L));
return (uint64_t)(log(u)
/ log(
1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
+ (uint64_t)1U;
#else
not_reached();
@ -322,9 +324,9 @@ prof_sample_enabled(void) {
}
te_base_cb_t prof_sample_te_handler = {
.enabled = &prof_sample_enabled,
.new_event_wait = &prof_sample_new_event_wait,
/*
.enabled = &prof_sample_enabled,
.new_event_wait = &prof_sample_new_event_wait,
/*
* The postponed wait time for prof sample event is computed as if we
* want a new wait time (i.e. as if the event were triggered). If we
* instead postpone to the immediate next allocation, like how we're
@ -332,8 +334,8 @@ te_base_cb_t prof_sample_te_handler = {
* the allocation immediately following a reentrancy always comes from
* the same stack trace.
*/
.postponed_event_wait = &prof_sample_new_event_wait,
.event_handler = &prof_sample_event_handler,
.postponed_event_wait = &prof_sample_new_event_wait,
.event_handler = &prof_sample_event_handler,
};
static void
@ -361,7 +363,7 @@ prof_idump_accum_init(void) {
void
prof_idump(tsdn_t *tsdn) {
tsd_t *tsd;
tsd_t *tsd;
prof_tdata_t *tdata;
cassert(config_prof);
@ -400,7 +402,7 @@ prof_mdump(tsd_t *tsd, const char *filename) {
void
prof_gdump(tsdn_t *tsdn) {
tsd_t *tsd;
tsd_t *tsd;
prof_tdata_t *tdata;
cassert(config_prof);
@ -447,7 +449,7 @@ prof_tdata_t *
prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) {
uint64_t thr_uid = tdata->thr_uid;
uint64_t thr_discrim = tdata->thr_discrim + 1;
bool active = tdata->active;
bool active = tdata->active;
/* Keep a local copy of the thread name, before detaching. */
prof_thread_name_assert(tdata);
@ -455,8 +457,8 @@ prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) {
strncpy(thread_name, tdata->thread_name, PROF_THREAD_NAME_MAX_LEN);
prof_tdata_detach(tsd, tdata);
return prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name,
active);
return prof_tdata_init_impl(
tsd, thr_uid, thr_discrim, thread_name, active);
}
void
@ -595,8 +597,8 @@ prof_backtrace_hook_set(prof_backtrace_hook_t hook) {
prof_backtrace_hook_t
prof_backtrace_hook_get(void) {
return (prof_backtrace_hook_t)atomic_load_p(&prof_backtrace_hook,
ATOMIC_ACQUIRE);
return (prof_backtrace_hook_t)atomic_load_p(
&prof_backtrace_hook, ATOMIC_ACQUIRE);
}
void
@ -606,8 +608,7 @@ prof_dump_hook_set(prof_dump_hook_t hook) {
prof_dump_hook_t
prof_dump_hook_get(void) {
return (prof_dump_hook_t)atomic_load_p(&prof_dump_hook,
ATOMIC_ACQUIRE);
return (prof_dump_hook_t)atomic_load_p(&prof_dump_hook, ATOMIC_ACQUIRE);
}
void
@ -617,8 +618,8 @@ prof_sample_hook_set(prof_sample_hook_t hook) {
prof_sample_hook_t
prof_sample_hook_get(void) {
return (prof_sample_hook_t)atomic_load_p(&prof_sample_hook,
ATOMIC_ACQUIRE);
return (prof_sample_hook_t)atomic_load_p(
&prof_sample_hook, ATOMIC_ACQUIRE);
}
void
@ -628,16 +629,16 @@ prof_sample_free_hook_set(prof_sample_free_hook_t hook) {
prof_sample_free_hook_t
prof_sample_free_hook_get(void) {
return (prof_sample_free_hook_t)atomic_load_p(&prof_sample_free_hook,
ATOMIC_ACQUIRE);
return (prof_sample_free_hook_t)atomic_load_p(
&prof_sample_free_hook, ATOMIC_ACQUIRE);
}
void
prof_boot0(void) {
cassert(config_prof);
memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
sizeof(PROF_PREFIX_DEFAULT));
memcpy(
opt_prof_prefix, PROF_PREFIX_DEFAULT, sizeof(PROF_PREFIX_DEFAULT));
}
void
@ -661,8 +662,8 @@ prof_boot1(void) {
opt_prof_gdump = false;
} else if (opt_prof) {
if (opt_lg_prof_interval >= 0) {
prof_interval = (((uint64_t)1U) <<
opt_lg_prof_interval);
prof_interval = (((uint64_t)1U)
<< opt_lg_prof_interval);
}
}
}
@ -676,41 +677,40 @@ prof_boot2(tsd_t *tsd, base_t *base) {
* stats when opt_prof is false.
*/
if (malloc_mutex_init(&prof_active_mtx, "prof_active",
WITNESS_RANK_PROF_ACTIVE, malloc_mutex_rank_exclusive)) {
WITNESS_RANK_PROF_ACTIVE, malloc_mutex_rank_exclusive)) {
return true;
}
if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump",
WITNESS_RANK_PROF_GDUMP, malloc_mutex_rank_exclusive)) {
WITNESS_RANK_PROF_GDUMP, malloc_mutex_rank_exclusive)) {
return true;
}
if (malloc_mutex_init(&prof_thread_active_init_mtx,
"prof_thread_active_init", WITNESS_RANK_PROF_THREAD_ACTIVE_INIT,
malloc_mutex_rank_exclusive)) {
"prof_thread_active_init", WITNESS_RANK_PROF_THREAD_ACTIVE_INIT,
malloc_mutex_rank_exclusive)) {
return true;
}
if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx",
WITNESS_RANK_PROF_BT2GCTX, malloc_mutex_rank_exclusive)) {
WITNESS_RANK_PROF_BT2GCTX, malloc_mutex_rank_exclusive)) {
return true;
}
if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas",
WITNESS_RANK_PROF_TDATAS, malloc_mutex_rank_exclusive)) {
WITNESS_RANK_PROF_TDATAS, malloc_mutex_rank_exclusive)) {
return true;
}
if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid",
WITNESS_RANK_PROF_NEXT_THR_UID, malloc_mutex_rank_exclusive)) {
WITNESS_RANK_PROF_NEXT_THR_UID, malloc_mutex_rank_exclusive)) {
return true;
}
if (malloc_mutex_init(&prof_stats_mtx, "prof_stats",
WITNESS_RANK_PROF_STATS, malloc_mutex_rank_exclusive)) {
WITNESS_RANK_PROF_STATS, malloc_mutex_rank_exclusive)) {
return true;
}
if (malloc_mutex_init(&prof_dump_filename_mtx,
"prof_dump_filename", WITNESS_RANK_PROF_DUMP_FILENAME,
malloc_mutex_rank_exclusive)) {
if (malloc_mutex_init(&prof_dump_filename_mtx, "prof_dump_filename",
WITNESS_RANK_PROF_DUMP_FILENAME, malloc_mutex_rank_exclusive)) {
return true;
}
if (malloc_mutex_init(&prof_dump_mtx, "prof_dump",
WITNESS_RANK_PROF_DUMP, malloc_mutex_rank_exclusive)) {
WITNESS_RANK_PROF_DUMP, malloc_mutex_rank_exclusive)) {
return true;
}
@ -730,8 +730,8 @@ prof_boot2(tsd_t *tsd, base_t *base) {
return true;
}
if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
atexit(prof_fdump) != 0) {
if (opt_prof_final && opt_prof_prefix[0] != '\0'
&& atexit(prof_fdump) != 0) {
malloc_write("<jemalloc>: Error in atexit()\n");
if (opt_abort) {
abort();
@ -755,8 +755,8 @@ prof_boot2(tsd_t *tsd, base_t *base) {
}
for (unsigned i = 0; i < PROF_NCTX_LOCKS; i++) {
if (malloc_mutex_init(&gctx_locks[i], "prof_gctx",
WITNESS_RANK_PROF_GCTX,
malloc_mutex_rank_exclusive)) {
WITNESS_RANK_PROF_GCTX,
malloc_mutex_rank_exclusive)) {
return true;
}
}
@ -768,8 +768,8 @@ prof_boot2(tsd_t *tsd, base_t *base) {
}
for (unsigned i = 0; i < PROF_NTDATA_LOCKS; i++) {
if (malloc_mutex_init(&tdata_locks[i], "prof_tdata",
WITNESS_RANK_PROF_TDATA,
malloc_mutex_rank_exclusive)) {
WITNESS_RANK_PROF_TDATA,
malloc_mutex_rank_exclusive)) {
return true;
}
}
@ -820,8 +820,8 @@ prof_postfork_parent(tsdn_t *tsdn) {
if (config_prof && opt_prof) {
unsigned i;
malloc_mutex_postfork_parent(tsdn,
&prof_thread_active_init_mtx);
malloc_mutex_postfork_parent(
tsdn, &prof_thread_active_init_mtx);
malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx);
malloc_mutex_postfork_parent(tsdn, &prof_stats_mtx);
malloc_mutex_postfork_parent(tsdn, &prof_recent_alloc_mtx);

View file

@ -36,7 +36,7 @@ malloc_mutex_t prof_dump_mtx;
* and destroying mutexes causes complications for systems that allocate when
* creating/destroying mutexes.
*/
malloc_mutex_t *gctx_locks;
malloc_mutex_t *gctx_locks;
static atomic_u_t cum_gctxs; /* Atomic counter. */
/*
@ -69,33 +69,32 @@ static int
prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) {
uint64_t a_thr_uid = a->thr_uid;
uint64_t b_thr_uid = b->thr_uid;
int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
if (ret == 0) {
uint64_t a_thr_discrim = a->thr_discrim;
uint64_t b_thr_discrim = b->thr_discrim;
ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim <
b_thr_discrim);
ret = (a_thr_discrim > b_thr_discrim)
- (a_thr_discrim < b_thr_discrim);
if (ret == 0) {
uint64_t a_tctx_uid = a->tctx_uid;
uint64_t b_tctx_uid = b->tctx_uid;
ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid <
b_tctx_uid);
ret = (a_tctx_uid > b_tctx_uid)
- (a_tctx_uid < b_tctx_uid);
}
}
return ret;
}
/* NOLINTBEGIN(performance-no-int-to-ptr) */
rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
tctx_link, prof_tctx_comp)
/* NOLINTEND(performance-no-int-to-ptr) */
rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t, tctx_link,
prof_tctx_comp)
/* NOLINTEND(performance-no-int-to-ptr) */
static int
prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) {
static int prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) {
unsigned a_len = a->bt.len;
unsigned b_len = b->bt.len;
unsigned comp_len = (a_len < b_len) ? a_len : b_len;
int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *));
int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *));
if (ret == 0) {
ret = (a_len > b_len) - (a_len < b_len);
}
@ -105,11 +104,10 @@ prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) {
/* NOLINTBEGIN(performance-no-int-to-ptr) */
rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
prof_gctx_comp)
/* NOLINTEND(performance-no-int-to-ptr) */
/* NOLINTEND(performance-no-int-to-ptr) */
static int
prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) {
int ret;
static int prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) {
int ret;
uint64_t a_uid = a->thr_uid;
uint64_t b_uid = b->thr_uid;
@ -126,12 +124,11 @@ prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) {
/* NOLINTBEGIN(performance-no-int-to-ptr) */
rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
prof_tdata_comp)
/* NOLINTEND(performance-no-int-to-ptr) */
/* NOLINTEND(performance-no-int-to-ptr) */
/******************************************************************************/
/******************************************************************************/
static malloc_mutex_t *
prof_gctx_mutex_choose(void) {
static malloc_mutex_t *prof_gctx_mutex_choose(void) {
unsigned ngctxs = atomic_fetch_add_u(&cum_gctxs, 1, ATOMIC_RELAXED);
return &gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS];
@ -145,8 +142,8 @@ prof_tdata_mutex_choose(uint64_t thr_uid) {
bool
prof_data_init(tsd_t *tsd) {
tdata_tree_new(&tdatas);
return ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS,
prof_bt_hash, prof_bt_keycomp);
return ckh_new(
tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash, prof_bt_keycomp);
}
static void
@ -195,8 +192,8 @@ prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) {
*/
size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size,
sz_size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true),
true);
sz_size2index(size), false, NULL, true,
arena_get(TSDN_NULL, 0, true), true);
if (gctx == NULL) {
return NULL;
}
@ -215,8 +212,7 @@ prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) {
}
static void
prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self,
prof_gctx_t *gctx) {
prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx) {
cassert(config_prof);
/*
@ -267,12 +263,12 @@ static bool
prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) {
union {
prof_gctx_t *p;
void *v;
prof_gctx_t *p;
void *v;
} gctx, tgctx;
union {
prof_bt_t *p;
void *v;
prof_bt_t *p;
void *v;
} btkey;
bool new_gctx;
@ -316,8 +312,8 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
if (tgctx.v != NULL) {
/* Lost race to insert. */
idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, NULL, true,
true);
idalloctm(
tsd_tsdn(tsd), tgctx.v, NULL, NULL, true, true);
}
}
prof_leave(tsd, tdata);
@ -331,11 +327,11 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
prof_tctx_t *
prof_lookup(tsd_t *tsd, prof_bt_t *bt) {
union {
prof_tctx_t *p;
void *v;
prof_tctx_t *p;
void *v;
} ret;
prof_tdata_t *tdata;
bool not_found;
bool not_found;
cassert(config_prof);
@ -349,16 +345,16 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) {
}
malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
if (not_found) {
void *btkey;
void *btkey;
prof_gctx_t *gctx;
bool new_gctx, error;
bool new_gctx, error;
/*
* This thread's cache lacks bt. Look for it in the global
* cache.
*/
if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx,
&new_gctx)) {
if (prof_lookup_global(
tsd, bt, tdata, &btkey, &gctx, &new_gctx)) {
return NULL;
}
@ -403,8 +399,8 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) {
/* Used in unit tests. */
static prof_tdata_t *
prof_tdata_count_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata,
void *arg) {
prof_tdata_count_iter(
prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata, void *arg) {
size_t *tdata_count = (size_t *)arg;
(*tdata_count)++;
@ -415,13 +411,13 @@ prof_tdata_count_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata,
/* Used in unit tests. */
size_t
prof_tdata_count(void) {
size_t tdata_count = 0;
size_t tdata_count = 0;
tsdn_t *tsdn;
tsdn = tsdn_fetch();
malloc_mutex_lock(tsdn, &tdatas_mtx);
tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
(void *)&tdata_count);
tdata_tree_iter(
&tdatas, NULL, prof_tdata_count_iter, (void *)&tdata_count);
malloc_mutex_unlock(tsdn, &tdatas_mtx);
return tdata_count;
@ -430,8 +426,8 @@ prof_tdata_count(void) {
/* Used in unit tests. */
size_t
prof_bt_count(void) {
size_t bt_count;
tsd_t *tsd;
size_t bt_count;
tsd_t *tsd;
prof_tdata_t *tdata;
tsd = tsd_fetch();
@ -477,10 +473,10 @@ prof_thread_name_set_impl(tsd_t *tsd, const char *thread_name) {
JEMALLOC_FORMAT_PRINTF(3, 4)
static void
prof_dump_printf(write_cb_t *prof_dump_write, void *cbopaque,
const char *format, ...) {
prof_dump_printf(
write_cb_t *prof_dump_write, void *cbopaque, const char *format, ...) {
va_list ap;
char buf[PROF_PRINTF_BUFSIZE];
char buf[PROF_PRINTF_BUFSIZE];
va_start(ap, format);
malloc_vsnprintf(buf, sizeof(buf), format, ap);
@ -509,7 +505,8 @@ prof_double_uint64_cast(double d) {
}
#endif
void prof_unbias_map_init(void) {
void
prof_unbias_map_init(void) {
/* See the comment in prof_sample_new_event_wait */
#ifdef JEMALLOC_PROF
for (szind_t i = 0; i < SC_NSIZES; i++) {
@ -621,8 +618,8 @@ prof_do_unbias(uint64_t c_out_shifted_i, uint64_t s_out_i, uint64_t *r_c_in,
}
static void
prof_dump_print_cnts(write_cb_t *prof_dump_write, void *cbopaque,
const prof_cnt_t *cnts) {
prof_dump_print_cnts(
write_cb_t *prof_dump_write, void *cbopaque, const prof_cnt_t *cnts) {
uint64_t curobjs;
uint64_t curbytes;
uint64_t accumobjs;
@ -639,8 +636,8 @@ prof_dump_print_cnts(write_cb_t *prof_dump_write, void *cbopaque,
accumbytes = cnts->accumbytes;
}
prof_dump_printf(prof_dump_write, cbopaque,
"%"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]",
curobjs, curbytes, accumobjs, accumbytes);
"%" FMTu64 ": %" FMTu64 " [%" FMTu64 ": %" FMTu64 "]", curobjs,
curbytes, accumobjs, accumbytes);
}
static void
@ -660,11 +657,11 @@ prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) {
memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
tdata->cnt_summed.curobjs_shifted_unbiased
+= tctx->dump_cnts.curobjs_shifted_unbiased;
tdata->cnt_summed.curobjs_shifted_unbiased +=
tctx->dump_cnts.curobjs_shifted_unbiased;
tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
tdata->cnt_summed.curbytes_unbiased
+= tctx->dump_cnts.curbytes_unbiased;
tdata->cnt_summed.curbytes_unbiased +=
tctx->dump_cnts.curbytes_unbiased;
if (opt_prof_accum) {
tdata->cnt_summed.accumobjs +=
tctx->dump_cnts.accumobjs;
@ -687,17 +684,17 @@ prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) {
malloc_mutex_assert_owner(tsdn, gctx->lock);
gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
gctx->cnt_summed.curobjs_shifted_unbiased
+= tctx->dump_cnts.curobjs_shifted_unbiased;
gctx->cnt_summed.curobjs_shifted_unbiased +=
tctx->dump_cnts.curobjs_shifted_unbiased;
gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
gctx->cnt_summed.curbytes_unbiased += tctx->dump_cnts.curbytes_unbiased;
if (opt_prof_accum) {
gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs;
gctx->cnt_summed.accumobjs_shifted_unbiased
+= tctx->dump_cnts.accumobjs_shifted_unbiased;
gctx->cnt_summed.accumobjs_shifted_unbiased +=
tctx->dump_cnts.accumobjs_shifted_unbiased;
gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes;
gctx->cnt_summed.accumbytes_unbiased
+= tctx->dump_cnts.accumbytes_unbiased;
gctx->cnt_summed.accumbytes_unbiased +=
tctx->dump_cnts.accumbytes_unbiased;
}
}
@ -725,9 +722,9 @@ prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
typedef struct prof_dump_iter_arg_s prof_dump_iter_arg_t;
struct prof_dump_iter_arg_s {
tsdn_t *tsdn;
tsdn_t *tsdn;
write_cb_t *prof_dump_write;
void *cbopaque;
void *cbopaque;
};
static prof_tctx_t *
@ -743,9 +740,9 @@ prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) {
case prof_tctx_state_dumping:
case prof_tctx_state_purgatory:
prof_dump_printf(arg->prof_dump_write, arg->cbopaque,
" t%"FMTu64": ", tctx->thr_uid);
prof_dump_print_cnts(arg->prof_dump_write, arg->cbopaque,
&tctx->dump_cnts);
" t%" FMTu64 ": ", tctx->thr_uid);
prof_dump_print_cnts(
arg->prof_dump_write, arg->cbopaque, &tctx->dump_cnts);
arg->prof_dump_write(arg->cbopaque, "\n");
break;
default:
@ -756,7 +753,7 @@ prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) {
static prof_tctx_t *
prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
tsdn_t *tsdn = (tsdn_t *)arg;
tsdn_t *tsdn = (tsdn_t *)arg;
prof_tctx_t *ret;
malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
@ -811,8 +808,8 @@ prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
prof_gctx_merge_iter_arg_t *arg = (prof_gctx_merge_iter_arg_t *)opaque;
malloc_mutex_lock(arg->tsdn, gctx->lock);
tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter,
(void *)arg->tsdn);
tctx_tree_iter(
&gctx->tctxs, NULL, prof_tctx_merge_iter, (void *)arg->tsdn);
if (gctx->cnt_summed.curobjs != 0) {
(*arg->leak_ngctx)++;
}
@ -824,7 +821,7 @@ prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
static void
prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) {
prof_tdata_t *tdata = prof_tdata_get(tsd, false);
prof_gctx_t *gctx;
prof_gctx_t *gctx;
/*
* Standard tree iteration won't work here, because as soon as we
@ -840,15 +837,14 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) {
next = NULL;
do {
prof_tctx_t *to_destroy =
tctx_tree_iter(&gctx->tctxs, next,
prof_tctx_finish_iter,
prof_tctx_t *to_destroy = tctx_tree_iter(
&gctx->tctxs, next, prof_tctx_finish_iter,
(void *)tsd_tsdn(tsd));
if (to_destroy != NULL) {
next = tctx_tree_next(&gctx->tctxs,
to_destroy);
tctx_tree_remove(&gctx->tctxs,
to_destroy);
next = tctx_tree_next(
&gctx->tctxs, to_destroy);
tctx_tree_remove(
&gctx->tctxs, to_destroy);
idalloctm(tsd_tsdn(tsd), to_destroy,
NULL, NULL, true, true);
} else {
@ -869,41 +865,41 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) {
typedef struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg_t;
struct prof_tdata_merge_iter_arg_s {
tsdn_t *tsdn;
tsdn_t *tsdn;
prof_cnt_t *cnt_all;
};
static prof_tdata_t *
prof_tdata_merge_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata,
void *opaque) {
prof_tdata_merge_iter_arg_t *arg =
(prof_tdata_merge_iter_arg_t *)opaque;
prof_tdata_merge_iter(
prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata, void *opaque) {
prof_tdata_merge_iter_arg_t *arg = (prof_tdata_merge_iter_arg_t *)
opaque;
malloc_mutex_lock(arg->tsdn, tdata->lock);
if (!tdata->expired) {
size_t tabind;
union {
prof_tctx_t *p;
void *v;
prof_tctx_t *p;
void *v;
} tctx;
tdata->dumping = true;
memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
&tctx.v);) {
for (tabind = 0;
!ckh_iter(&tdata->bt2tctx, &tabind, NULL, &tctx.v);) {
prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata);
}
arg->cnt_all->curobjs += tdata->cnt_summed.curobjs;
arg->cnt_all->curobjs_shifted_unbiased
+= tdata->cnt_summed.curobjs_shifted_unbiased;
arg->cnt_all->curobjs_shifted_unbiased +=
tdata->cnt_summed.curobjs_shifted_unbiased;
arg->cnt_all->curbytes += tdata->cnt_summed.curbytes;
arg->cnt_all->curbytes_unbiased
+= tdata->cnt_summed.curbytes_unbiased;
arg->cnt_all->curbytes_unbiased +=
tdata->cnt_summed.curbytes_unbiased;
if (opt_prof_accum) {
arg->cnt_all->accumobjs += tdata->cnt_summed.accumobjs;
arg->cnt_all->accumobjs_shifted_unbiased
+= tdata->cnt_summed.accumobjs_shifted_unbiased;
arg->cnt_all->accumobjs_shifted_unbiased +=
tdata->cnt_summed.accumobjs_shifted_unbiased;
arg->cnt_all->accumbytes +=
tdata->cnt_summed.accumbytes;
arg->cnt_all->accumbytes_unbiased +=
@ -918,17 +914,17 @@ prof_tdata_merge_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata,
}
static prof_tdata_t *
prof_tdata_dump_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata,
void *opaque) {
prof_tdata_dump_iter(
prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata, void *opaque) {
if (!tdata->dumping) {
return NULL;
}
prof_dump_iter_arg_t *arg = (prof_dump_iter_arg_t *)opaque;
prof_dump_printf(arg->prof_dump_write, arg->cbopaque, " t%"FMTu64": ",
tdata->thr_uid);
prof_dump_print_cnts(arg->prof_dump_write, arg->cbopaque,
&tdata->cnt_summed);
prof_dump_printf(arg->prof_dump_write, arg->cbopaque,
" t%" FMTu64 ": ", tdata->thr_uid);
prof_dump_print_cnts(
arg->prof_dump_write, arg->cbopaque, &tdata->cnt_summed);
if (!prof_thread_name_empty(tdata)) {
arg->prof_dump_write(arg->cbopaque, " ");
arg->prof_dump_write(arg->cbopaque, tdata->thread_name);
@ -940,7 +936,7 @@ prof_tdata_dump_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata,
static void
prof_dump_header(prof_dump_iter_arg_t *arg, const prof_cnt_t *cnt_all) {
prof_dump_printf(arg->prof_dump_write, arg->cbopaque,
"heap_v2/%"FMTu64"\n t*: ", ((uint64_t)1U << lg_prof_sample));
"heap_v2/%" FMTu64 "\n t*: ", ((uint64_t)1U << lg_prof_sample));
prof_dump_print_cnts(arg->prof_dump_write, arg->cbopaque, cnt_all);
arg->prof_dump_write(arg->cbopaque, "\n");
@ -956,8 +952,8 @@ prof_dump_gctx(prof_dump_iter_arg_t *arg, prof_gctx_t *gctx,
malloc_mutex_assert_owner(arg->tsdn, gctx->lock);
/* Avoid dumping such gctx's that have no useful data. */
if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
(opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) {
if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0)
|| (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) {
assert(gctx->cnt_summed.curobjs == 0);
assert(gctx->cnt_summed.curbytes == 0);
/*
@ -976,12 +972,12 @@ prof_dump_gctx(prof_dump_iter_arg_t *arg, prof_gctx_t *gctx,
arg->prof_dump_write(arg->cbopaque, "@");
for (unsigned i = 0; i < bt->len; i++) {
prof_dump_printf(arg->prof_dump_write, arg->cbopaque,
" %#"FMTxPTR, (uintptr_t)bt->vec[i]);
" %#" FMTxPTR, (uintptr_t)bt->vec[i]);
}
arg->prof_dump_write(arg->cbopaque, "\n t*: ");
prof_dump_print_cnts(arg->prof_dump_write, arg->cbopaque,
&gctx->cnt_summed);
prof_dump_print_cnts(
arg->prof_dump_write, arg->cbopaque, &gctx->cnt_summed);
arg->prof_dump_write(arg->cbopaque, "\n");
tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter, arg);
@ -1002,18 +998,21 @@ prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx) {
*/
if (cnt_all->curbytes != 0) {
double sample_period = (double)((uint64_t)1 << lg_prof_sample);
double ratio = (((double)cnt_all->curbytes) /
(double)cnt_all->curobjs) / sample_period;
double scale_factor = 1.0 / (1.0 - exp(-ratio));
uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes)
* scale_factor);
uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) *
scale_factor);
double ratio = (((double)cnt_all->curbytes)
/ (double)cnt_all->curobjs)
/ sample_period;
double scale_factor = 1.0 / (1.0 - exp(-ratio));
uint64_t curbytes = (uint64_t)round(
((double)cnt_all->curbytes) * scale_factor);
uint64_t curobjs = (uint64_t)round(
((double)cnt_all->curobjs) * scale_factor);
malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64
" byte%s, ~%"FMTu64" object%s, >= %zu context%s\n",
curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs !=
1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : "");
malloc_printf(
"<jemalloc>: Leak approximation summary: ~%" FMTu64
" byte%s, ~%" FMTu64 " object%s, >= %zu context%s\n",
curbytes, (curbytes != 1) ? "s" : "", curobjs,
(curobjs != 1) ? "s" : "", leak_ngctx,
(leak_ngctx != 1) ? "s" : "");
malloc_printf(
"<jemalloc>: Run jeprof on dump output for leak detail\n");
if (opt_prof_leak_error) {
@ -1044,8 +1043,8 @@ prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata, prof_cnt_t *cnt_all,
size_t *leak_ngctx, prof_gctx_tree_t *gctxs) {
size_t tabind;
union {
prof_gctx_t *p;
void *v;
prof_gctx_t *p;
void *v;
} gctx;
prof_enter(tsd, tdata);
@ -1064,19 +1063,19 @@ prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata, prof_cnt_t *cnt_all,
* stats and merge them into the associated gctx's.
*/
memset(cnt_all, 0, sizeof(prof_cnt_t));
prof_tdata_merge_iter_arg_t prof_tdata_merge_iter_arg = {tsd_tsdn(tsd),
cnt_all};
prof_tdata_merge_iter_arg_t prof_tdata_merge_iter_arg = {
tsd_tsdn(tsd), cnt_all};
malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter,
&prof_tdata_merge_iter_arg);
tdata_tree_iter(
&tdatas, NULL, prof_tdata_merge_iter, &prof_tdata_merge_iter_arg);
malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
/* Merge tctx stats into gctx's. */
*leak_ngctx = 0;
prof_gctx_merge_iter_arg_t prof_gctx_merge_iter_arg = {tsd_tsdn(tsd),
leak_ngctx};
gctx_tree_iter(gctxs, NULL, prof_gctx_merge_iter,
&prof_gctx_merge_iter_arg);
prof_gctx_merge_iter_arg_t prof_gctx_merge_iter_arg = {
tsd_tsdn(tsd), leak_ngctx};
gctx_tree_iter(
gctxs, NULL, prof_gctx_merge_iter, &prof_gctx_merge_iter_arg);
prof_leave(tsd, tdata);
}
@ -1085,12 +1084,12 @@ void
prof_dump_impl(tsd_t *tsd, write_cb_t *prof_dump_write, void *cbopaque,
prof_tdata_t *tdata, bool leakcheck) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_dump_mtx);
prof_cnt_t cnt_all;
size_t leak_ngctx;
prof_cnt_t cnt_all;
size_t leak_ngctx;
prof_gctx_tree_t gctxs;
prof_dump_prep(tsd, tdata, &cnt_all, &leak_ngctx, &gctxs);
prof_dump_iter_arg_t prof_dump_iter_arg = {tsd_tsdn(tsd),
prof_dump_write, cbopaque};
prof_dump_iter_arg_t prof_dump_iter_arg = {
tsd_tsdn(tsd), prof_dump_write, cbopaque};
prof_dump_header(&prof_dump_iter_arg, &cnt_all);
gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter, &prof_dump_iter_arg);
prof_gctx_finish(tsd, &gctxs);
@ -1102,12 +1101,12 @@ prof_dump_impl(tsd_t *tsd, write_cb_t *prof_dump_write, void *cbopaque,
/* Used in unit tests. */
void
prof_cnt_all(prof_cnt_t *cnt_all) {
tsd_t *tsd = tsd_fetch();
tsd_t *tsd = tsd_fetch();
prof_tdata_t *tdata = prof_tdata_get(tsd, false);
if (tdata == NULL) {
memset(cnt_all, 0, sizeof(prof_cnt_t));
} else {
size_t leak_ngctx;
size_t leak_ngctx;
prof_gctx_tree_t gctxs;
prof_dump_prep(tsd, tdata, cnt_all, &leak_ngctx, &gctxs);
prof_gctx_finish(tsd, &gctxs);
@ -1148,8 +1147,8 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
/* Initialize an empty cache for this thread. */
size_t tdata_sz = ALIGNMENT_CEILING(sizeof(prof_tdata_t), QUANTUM);
size_t total_sz = tdata_sz + sizeof(void *) * opt_prof_bt_max;
tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd),
total_sz, sz_size2index(total_sz), false, NULL, true,
tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), total_sz,
sz_size2index(total_sz), false, NULL, true,
arena_get(TSDN_NULL, 0, true), true);
if (tdata == NULL) {
return NULL;
@ -1170,7 +1169,7 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
prof_thread_name_assert(tdata);
if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash,
prof_bt_keycomp)) {
prof_bt_keycomp)) {
idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true);
return NULL;
}
@ -1201,16 +1200,16 @@ prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) {
}
static bool
prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
bool even_if_attached) {
prof_tdata_should_destroy(
tsdn_t *tsdn, prof_tdata_t *tdata, bool even_if_attached) {
malloc_mutex_assert_owner(tsdn, tdata->lock);
return prof_tdata_should_destroy_unlocked(tdata, even_if_attached);
}
static void
prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
bool even_if_attached) {
prof_tdata_destroy_locked(
tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx);
malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tdata->lock);
@ -1234,8 +1233,8 @@ prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) {
malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
if (tdata->attached) {
destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata,
true);
destroy_tdata = prof_tdata_should_destroy(
tsd_tsdn(tsd), tdata, true);
/*
* Only detach if !destroy_tdata, because detaching would allow
* another thread to win the race to destroy tdata.
@ -1270,8 +1269,8 @@ prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) {
}
static prof_tdata_t *
prof_tdata_reset_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata,
void *arg) {
prof_tdata_reset_iter(
prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata, void *arg) {
tsdn_t *tsdn = (tsdn_t *)arg;
return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL);
@ -1291,8 +1290,8 @@ prof_reset(tsd_t *tsd, size_t lg_sample) {
next = NULL;
do {
prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
prof_tdata_reset_iter, (void *)tsd);
prof_tdata_t *to_destroy = tdata_tree_iter(
&tdatas, next, prof_tdata_reset_iter, (void *)tsd);
if (to_destroy != NULL) {
next = tdata_tree_next(&tdatas, to_destroy);
prof_tdata_destroy_locked(tsd, to_destroy, false);
@ -1355,8 +1354,8 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) {
prof_tdata_t *tdata = tctx->tdata;
tctx->tdata = NULL;
ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL);
bool destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd),
tdata, false);
bool destroy_tdata = prof_tdata_should_destroy(
tsd_tsdn(tsd), tdata, false);
malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
if (destroy_tdata) {
prof_tdata_destroy(tsd, tdata, false);

View file

@ -12,7 +12,7 @@
#include "jemalloc/internal/prof_log.h"
#include "jemalloc/internal/prof_sys.h"
bool opt_prof_log = false;
bool opt_prof_log = false;
typedef enum prof_logging_state_e prof_logging_state_t;
enum prof_logging_state_e {
prof_logging_state_stopped,
@ -32,8 +32,8 @@ static bool prof_log_dummy = false;
/* Incremented for every log file that is output. */
static uint64_t log_seq = 0;
static char log_filename[
/* Minimize memory bloat for non-prof builds. */
static char log_filename[
/* Minimize memory bloat for non-prof builds. */
#ifdef JEMALLOC_PROF
PATH_MAX +
#endif
@ -51,8 +51,8 @@ typedef struct prof_bt_node_s prof_bt_node_t;
struct prof_bt_node_s {
prof_bt_node_t *next;
size_t index;
prof_bt_t bt;
size_t index;
prof_bt_t bt;
/* Variable size backtrace vector pointed to by bt. */
void *vec[1];
};
@ -61,8 +61,8 @@ typedef struct prof_thr_node_s prof_thr_node_t;
struct prof_thr_node_s {
prof_thr_node_t *next;
size_t index;
uint64_t thr_uid;
size_t index;
uint64_t thr_uid;
/* Variable size based on thr_name_sz. */
char name[1];
};
@ -91,15 +91,15 @@ struct prof_alloc_node_s {
* These are the backtraces and threads that have already been logged by an
* allocation.
*/
static bool log_tables_initialized = false;
static bool log_tables_initialized = false;
static ckh_t log_bt_node_set;
static ckh_t log_thr_node_set;
/* Store linked lists for logged data. */
static prof_bt_node_t *log_bt_first = NULL;
static prof_bt_node_t *log_bt_last = NULL;
static prof_thr_node_t *log_thr_first = NULL;
static prof_thr_node_t *log_thr_last = NULL;
static prof_bt_node_t *log_bt_first = NULL;
static prof_bt_node_t *log_bt_last = NULL;
static prof_thr_node_t *log_thr_first = NULL;
static prof_thr_node_t *log_thr_last = NULL;
static prof_alloc_node_t *log_alloc_first = NULL;
static prof_alloc_node_t *log_alloc_last = NULL;
@ -131,12 +131,12 @@ prof_log_bt_index(tsd_t *tsd, prof_bt_t *bt) {
/* See if this backtrace is already cached in the table. */
if (ckh_search(&log_bt_node_set, (void *)(&dummy_node),
(void **)(&node), NULL)) {
size_t sz = offsetof(prof_bt_node_t, vec) +
(bt->len * sizeof(void *));
prof_bt_node_t *new_node = (prof_bt_node_t *)
iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL,
true, arena_get(TSDN_NULL, 0, true), true);
(void **)(&node), NULL)) {
size_t sz = offsetof(prof_bt_node_t, vec)
+ (bt->len * sizeof(void *));
prof_bt_node_t *new_node = (prof_bt_node_t *)iallocztm(
tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, true,
arena_get(TSDN_NULL, 0, true), true);
if (log_bt_first == NULL) {
log_bt_first = new_node;
log_bt_last = new_node;
@ -174,11 +174,11 @@ prof_log_thr_index(tsd_t *tsd, uint64_t thr_uid, const char *name) {
/* See if this thread is already cached in the table. */
if (ckh_search(&log_thr_node_set, (void *)(&dummy_node),
(void **)(&node), NULL)) {
(void **)(&node), NULL)) {
size_t sz = offsetof(prof_thr_node_t, name) + strlen(name) + 1;
prof_thr_node_t *new_node = (prof_thr_node_t *)
iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL,
true, arena_get(TSDN_NULL, 0, true), true);
prof_thr_node_t *new_node = (prof_thr_node_t *)iallocztm(
tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, true,
arena_get(TSDN_NULL, 0, true), true);
if (log_thr_first == NULL) {
log_thr_first = new_node;
log_thr_last = new_node;
@ -225,9 +225,9 @@ prof_try_log(tsd_t *tsd, size_t usize, prof_info_t *prof_info) {
if (!log_tables_initialized) {
bool err1 = ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS,
prof_bt_node_hash, prof_bt_node_keycomp);
prof_bt_node_hash, prof_bt_node_keycomp);
bool err2 = ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS,
prof_thr_node_hash, prof_thr_node_keycomp);
prof_thr_node_hash, prof_thr_node_keycomp);
if (err1 || err2) {
goto label_done;
}
@ -238,9 +238,9 @@ prof_try_log(tsd_t *tsd, size_t usize, prof_info_t *prof_info) {
nstime_t free_time;
nstime_prof_init_update(&free_time);
size_t sz = sizeof(prof_alloc_node_t);
prof_alloc_node_t *new_node = (prof_alloc_node_t *)
iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, true,
size_t sz = sizeof(prof_alloc_node_t);
prof_alloc_node_t *new_node = (prof_alloc_node_t *)iallocztm(
tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, true,
arena_get(TSDN_NULL, 0, true), true);
const char *prod_thr_name = tctx->tdata->thread_name;
@ -256,10 +256,10 @@ prof_try_log(tsd_t *tsd, size_t usize, prof_info_t *prof_info) {
prof_bt_t *prod_bt = &tctx->gctx->bt;
new_node->next = NULL;
new_node->alloc_thr_ind = prof_log_thr_index(tsd, tctx->tdata->thr_uid,
prod_thr_name);
new_node->free_thr_ind = prof_log_thr_index(tsd, cons_tdata->thr_uid,
cons_thr_name);
new_node->alloc_thr_ind = prof_log_thr_index(
tsd, tctx->tdata->thr_uid, prod_thr_name);
new_node->free_thr_ind = prof_log_thr_index(
tsd, cons_tdata->thr_uid, cons_thr_name);
new_node->alloc_bt_ind = prof_log_bt_index(tsd, prod_bt);
new_node->free_bt_ind = prof_log_bt_index(tsd, cons_bt);
new_node->alloc_time_ns = nstime_ns(&alloc_time);
@ -288,8 +288,8 @@ static bool
prof_bt_node_keycomp(const void *k1, const void *k2) {
const prof_bt_node_t *bt_node1 = (prof_bt_node_t *)k1;
const prof_bt_node_t *bt_node2 = (prof_bt_node_t *)k2;
return prof_bt_keycomp((void *)(&bt_node1->bt),
(void *)(&bt_node2->bt));
return prof_bt_keycomp(
(void *)(&bt_node1->bt), (void *)(&bt_node2->bt));
}
static void
@ -309,7 +309,7 @@ prof_thr_node_keycomp(const void *k1, const void *k2) {
size_t
prof_log_bt_count(void) {
cassert(config_prof);
size_t cnt = 0;
size_t cnt = 0;
prof_bt_node_t *node = log_bt_first;
while (node != NULL) {
cnt++;
@ -322,7 +322,7 @@ prof_log_bt_count(void) {
size_t
prof_log_alloc_count(void) {
cassert(config_prof);
size_t cnt = 0;
size_t cnt = 0;
prof_alloc_node_t *node = log_alloc_first;
while (node != NULL) {
cnt++;
@ -335,7 +335,7 @@ prof_log_alloc_count(void) {
size_t
prof_log_thr_count(void) {
cassert(config_prof);
size_t cnt = 0;
size_t cnt = 0;
prof_thr_node_t *node = log_thr_first;
while (node != NULL) {
cnt++;
@ -374,7 +374,6 @@ prof_log_rep_check(void) {
size_t thr_count = prof_log_thr_count();
size_t alloc_count = prof_log_alloc_count();
if (prof_logging_state == prof_logging_state_stopped) {
if (bt_count != 0 || thr_count != 0 || alloc_count || 0) {
return true;
@ -435,7 +434,8 @@ prof_log_start(tsdn_t *tsdn, const char *filename) {
if (!prof_log_atexit_called) {
prof_log_atexit_called = true;
if (atexit(prof_log_stop_final) != 0) {
malloc_write("<jemalloc>: Error in atexit() "
malloc_write(
"<jemalloc>: Error in atexit() "
"for logging\n");
if (opt_abort) {
abort();
@ -469,14 +469,14 @@ label_done:
}
struct prof_emitter_cb_arg_s {
int fd;
int fd;
ssize_t ret;
};
static void
prof_emitter_write_cb(void *opaque, const char *to_write) {
struct prof_emitter_cb_arg_s *arg =
(struct prof_emitter_cb_arg_s *)opaque;
struct prof_emitter_cb_arg_s *arg = (struct prof_emitter_cb_arg_s *)
opaque;
size_t bytes = strlen(to_write);
if (prof_log_dummy) {
return;
@ -501,8 +501,8 @@ prof_log_emit_threads(tsd_t *tsd, emitter_t *emitter) {
char *thr_name = thr_node->name;
emitter_json_kv(emitter, "thr_name", emitter_type_string,
&thr_name);
emitter_json_kv(
emitter, "thr_name", emitter_type_string, &thr_name);
emitter_json_object_end(emitter);
thr_old_node = thr_node;
@ -521,7 +521,7 @@ prof_log_emit_traces(tsd_t *tsd, emitter_t *emitter) {
* Calculate how many hex digits we need: twice number of bytes, two for
* "0x", and then one more for terminating '\0'.
*/
char buf[2 * sizeof(intptr_t) + 3];
char buf[2 * sizeof(intptr_t) + 3];
size_t buf_sz = sizeof(buf);
while (bt_node != NULL) {
emitter_json_array_begin(emitter);
@ -529,8 +529,8 @@ prof_log_emit_traces(tsd_t *tsd, emitter_t *emitter) {
for (i = 0; i < bt_node->bt.len; i++) {
malloc_snprintf(buf, buf_sz, "%p", bt_node->bt.vec[i]);
char *trace_str = buf;
emitter_json_value(emitter, emitter_type_string,
&trace_str);
emitter_json_value(
emitter, emitter_type_string, &trace_str);
}
emitter_json_array_end(emitter);
@ -561,21 +561,21 @@ prof_log_emit_allocs(tsd_t *tsd, emitter_t *emitter) {
emitter_json_kv(emitter, "free_trace", emitter_type_size,
&alloc_node->free_bt_ind);
emitter_json_kv(emitter, "alloc_timestamp",
emitter_type_uint64, &alloc_node->alloc_time_ns);
emitter_json_kv(emitter, "alloc_timestamp", emitter_type_uint64,
&alloc_node->alloc_time_ns);
emitter_json_kv(emitter, "free_timestamp", emitter_type_uint64,
&alloc_node->free_time_ns);
emitter_json_kv(emitter, "usize", emitter_type_uint64,
&alloc_node->usize);
emitter_json_kv(
emitter, "usize", emitter_type_uint64, &alloc_node->usize);
emitter_json_object_end(emitter);
alloc_old_node = alloc_node;
alloc_node = alloc_node->next;
idalloctm(tsd_tsdn(tsd), alloc_old_node, NULL, NULL, true,
true);
idalloctm(
tsd_tsdn(tsd), alloc_old_node, NULL, NULL, true, true);
}
emitter_json_array_end(emitter);
}
@ -591,15 +591,14 @@ prof_log_emit_metadata(emitter_t *emitter) {
emitter_json_kv(emitter, "duration", emitter_type_uint64, &ns);
char *vers = JEMALLOC_VERSION;
emitter_json_kv(emitter, "version",
emitter_type_string, &vers);
emitter_json_kv(emitter, "version", emitter_type_string, &vers);
emitter_json_kv(emitter, "lg_sample_rate",
emitter_type_int, &lg_prof_sample);
emitter_json_kv(
emitter, "lg_sample_rate", emitter_type_int, &lg_prof_sample);
const char *res_type = prof_time_res_mode_names[opt_prof_time_res];
emitter_json_kv(emitter, "prof_time_resolution", emitter_type_string,
&res_type);
emitter_json_kv(
emitter, "prof_time_resolution", emitter_type_string, &res_type);
int pid = prof_getpid();
emitter_json_kv(emitter, "pid", emitter_type_int, &pid);
@ -632,7 +631,6 @@ prof_log_stop(tsdn_t *tsdn) {
prof_logging_state = prof_logging_state_dumping;
malloc_mutex_unlock(tsdn, &log_mtx);
emitter_t emitter;
/* Create a file. */
@ -645,8 +643,10 @@ prof_log_stop(tsdn_t *tsdn) {
}
if (fd == -1) {
malloc_printf("<jemalloc>: creat() for log file \"%s\" "
" failed with %d\n", log_filename, errno);
malloc_printf(
"<jemalloc>: creat() for log file \"%s\" "
" failed with %d\n",
log_filename, errno);
if (opt_abort) {
abort();
}
@ -659,8 +659,8 @@ prof_log_stop(tsdn_t *tsdn) {
buf_writer_t buf_writer;
buf_writer_init(tsdn, &buf_writer, prof_emitter_write_cb, &arg, NULL,
PROF_LOG_STOP_BUFSIZE);
emitter_init(&emitter, emitter_output_json_compact, buf_writer_cb,
&buf_writer);
emitter_init(
&emitter, emitter_output_json_compact, buf_writer_cb, &buf_writer);
emitter_begin(&emitter);
prof_log_emit_metadata(&emitter);
@ -701,8 +701,8 @@ JEMALLOC_COLD
bool
prof_log_init(tsd_t *tsd) {
cassert(config_prof);
if (malloc_mutex_init(&log_mtx, "prof_log",
WITNESS_RANK_PROF_LOG, malloc_mutex_rank_exclusive)) {
if (malloc_mutex_init(&log_mtx, "prof_log", WITNESS_RANK_PROF_LOG,
malloc_mutex_rank_exclusive)) {
return true;
}

View file

@ -7,18 +7,18 @@
#include "jemalloc/internal/prof_data.h"
#include "jemalloc/internal/prof_recent.h"
ssize_t opt_prof_recent_alloc_max = PROF_RECENT_ALLOC_MAX_DEFAULT;
malloc_mutex_t prof_recent_alloc_mtx; /* Protects the fields below */
ssize_t opt_prof_recent_alloc_max = PROF_RECENT_ALLOC_MAX_DEFAULT;
malloc_mutex_t prof_recent_alloc_mtx; /* Protects the fields below */
static atomic_zd_t prof_recent_alloc_max;
static ssize_t prof_recent_alloc_count = 0;
static ssize_t prof_recent_alloc_count = 0;
prof_recent_list_t prof_recent_alloc_list;
malloc_mutex_t prof_recent_dump_mtx; /* Protects dumping. */
static void
prof_recent_alloc_max_init(void) {
atomic_store_zd(&prof_recent_alloc_max, opt_prof_recent_alloc_max,
ATOMIC_RELAXED);
atomic_store_zd(
&prof_recent_alloc_max, opt_prof_recent_alloc_max, ATOMIC_RELAXED);
}
static inline ssize_t
@ -144,26 +144,26 @@ edata_prof_recent_alloc_get_no_lock_test(const edata_t *edata) {
static inline prof_recent_t *
edata_prof_recent_alloc_get(tsd_t *tsd, const edata_t *edata) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
prof_recent_t *recent_alloc =
edata_prof_recent_alloc_get_no_lock(edata);
assert(recent_alloc == NULL ||
prof_recent_alloc_edata_get(tsd, recent_alloc) == edata);
prof_recent_t *recent_alloc = edata_prof_recent_alloc_get_no_lock(
edata);
assert(recent_alloc == NULL
|| prof_recent_alloc_edata_get(tsd, recent_alloc) == edata);
return recent_alloc;
}
static prof_recent_t *
edata_prof_recent_alloc_update_internal(tsd_t *tsd, edata_t *edata,
prof_recent_t *recent_alloc) {
edata_prof_recent_alloc_update_internal(
tsd_t *tsd, edata_t *edata, prof_recent_t *recent_alloc) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
prof_recent_t *old_recent_alloc =
edata_prof_recent_alloc_get(tsd, edata);
prof_recent_t *old_recent_alloc = edata_prof_recent_alloc_get(
tsd, edata);
edata_prof_recent_alloc_set_dont_call_directly(edata, recent_alloc);
return old_recent_alloc;
}
static void
edata_prof_recent_alloc_set(tsd_t *tsd, edata_t *edata,
prof_recent_t *recent_alloc) {
edata_prof_recent_alloc_set(
tsd_t *tsd, edata_t *edata, prof_recent_t *recent_alloc) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
assert(recent_alloc != NULL);
prof_recent_t *old_recent_alloc =
@ -173,8 +173,8 @@ edata_prof_recent_alloc_set(tsd_t *tsd, edata_t *edata,
}
static void
edata_prof_recent_alloc_reset(tsd_t *tsd, edata_t *edata,
prof_recent_t *recent_alloc) {
edata_prof_recent_alloc_reset(
tsd_t *tsd, edata_t *edata, prof_recent_t *recent_alloc) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
assert(recent_alloc != NULL);
prof_recent_t *old_recent_alloc =
@ -265,14 +265,14 @@ prof_recent_alloc_assert_count(tsd_t *tsd) {
if (!config_debug) {
return;
}
ssize_t count = 0;
ssize_t count = 0;
prof_recent_t *n;
ql_foreach(n, &prof_recent_alloc_list, link) {
ql_foreach (n, &prof_recent_alloc_list, link) {
++count;
}
assert(count == prof_recent_alloc_count);
assert(prof_recent_alloc_max_get(tsd) == -1 ||
count <= prof_recent_alloc_max_get(tsd));
assert(prof_recent_alloc_max_get(tsd) == -1
|| count <= prof_recent_alloc_max_get(tsd));
}
void
@ -319,8 +319,8 @@ prof_recent_alloc(tsd_t *tsd, edata_t *edata, size_t size, size_t usize) {
* the allocation locks.
*/
prof_recent_t *reserve = NULL;
if (prof_recent_alloc_max_get(tsd) == -1 ||
prof_recent_alloc_count < prof_recent_alloc_max_get(tsd)) {
if (prof_recent_alloc_max_get(tsd) == -1
|| prof_recent_alloc_count < prof_recent_alloc_max_get(tsd)) {
assert(prof_recent_alloc_max_get(tsd) != 0);
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
reserve = prof_recent_allocate_node(tsd_tsdn(tsd));
@ -346,8 +346,9 @@ prof_recent_alloc(tsd_t *tsd, edata_t *edata, size_t size, size_t usize) {
ql_rotate(&prof_recent_alloc_list, link);
} else {
/* Otherwise make use of the new node. */
assert(prof_recent_alloc_max_get(tsd) == -1 ||
prof_recent_alloc_count < prof_recent_alloc_max_get(tsd));
assert(prof_recent_alloc_max_get(tsd) == -1
|| prof_recent_alloc_count
< prof_recent_alloc_max_get(tsd));
if (reserve == NULL) {
goto label_rollback;
}
@ -421,7 +422,7 @@ prof_recent_alloc_restore_locked(tsd_t *tsd, prof_recent_list_t *to_delete) {
}
prof_recent_t *node;
ql_foreach(node, &prof_recent_alloc_list, link) {
ql_foreach (node, &prof_recent_alloc_list, link) {
if (prof_recent_alloc_count == max) {
break;
}
@ -462,7 +463,7 @@ prof_recent_alloc_max_ctl_write(tsd_t *tsd, ssize_t max) {
assert(max >= -1);
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
prof_recent_alloc_assert_count(tsd);
const ssize_t old_max = prof_recent_alloc_max_update(tsd, max);
const ssize_t old_max = prof_recent_alloc_max_update(tsd, max);
prof_recent_list_t to_delete;
prof_recent_alloc_restore_locked(tsd, &to_delete);
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
@ -472,7 +473,7 @@ prof_recent_alloc_max_ctl_write(tsd_t *tsd, ssize_t max) {
static void
prof_recent_alloc_dump_bt(emitter_t *emitter, prof_tctx_t *tctx) {
char bt_buf[2 * sizeof(intptr_t) + 3];
char bt_buf[2 * sizeof(intptr_t) + 3];
char *s = bt_buf;
assert(tctx != NULL);
prof_bt_t *bt = &tctx->gctx->bt;
@ -501,8 +502,8 @@ prof_recent_alloc_dump_node(emitter_t *emitter, prof_recent_t *node) {
emitter_type_string, &thread_name);
}
uint64_t alloc_time_ns = nstime_ns(&node->alloc_time);
emitter_json_kv(emitter, "alloc_time", emitter_type_uint64,
&alloc_time_ns);
emitter_json_kv(
emitter, "alloc_time", emitter_type_uint64, &alloc_time_ns);
emitter_json_array_kv_begin(emitter, "alloc_trace");
prof_recent_alloc_dump_bt(emitter, node->alloc_tctx);
emitter_json_array_end(emitter);
@ -539,8 +540,8 @@ prof_recent_alloc_dump(tsd_t *tsd, write_cb_t *write_cb, void *cbopaque) {
buf_writer_init(tsd_tsdn(tsd), &buf_writer, write_cb, cbopaque, NULL,
PROF_RECENT_PRINT_BUFSIZE);
emitter_t emitter;
emitter_init(&emitter, emitter_output_json_compact, buf_writer_cb,
&buf_writer);
emitter_init(
&emitter, emitter_output_json_compact, buf_writer_cb, &buf_writer);
prof_recent_list_t temp_list;
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
@ -554,13 +555,13 @@ prof_recent_alloc_dump(tsd_t *tsd, write_cb_t *write_cb, void *cbopaque) {
emitter_begin(&emitter);
uint64_t sample_interval = (uint64_t)1U << lg_prof_sample;
emitter_json_kv(&emitter, "sample_interval", emitter_type_uint64,
&sample_interval);
emitter_json_kv(&emitter, "recent_alloc_max", emitter_type_ssize,
&dump_max);
emitter_json_kv(
&emitter, "sample_interval", emitter_type_uint64, &sample_interval);
emitter_json_kv(
&emitter, "recent_alloc_max", emitter_type_ssize, &dump_max);
emitter_json_array_kv_begin(&emitter, "recent_alloc");
prof_recent_t *node;
ql_foreach(node, &temp_list, link) {
ql_foreach (node, &temp_list, link) {
prof_recent_alloc_dump_node(&emitter, node);
}
emitter_json_array_end(&emitter);
@ -587,12 +588,12 @@ prof_recent_init(void) {
prof_recent_alloc_max_init();
if (malloc_mutex_init(&prof_recent_alloc_mtx, "prof_recent_alloc",
WITNESS_RANK_PROF_RECENT_ALLOC, malloc_mutex_rank_exclusive)) {
WITNESS_RANK_PROF_RECENT_ALLOC, malloc_mutex_rank_exclusive)) {
return true;
}
if (malloc_mutex_init(&prof_recent_dump_mtx, "prof_recent_dump",
WITNESS_RANK_PROF_RECENT_DUMP, malloc_mutex_rank_exclusive)) {
WITNESS_RANK_PROF_RECENT_DUMP, malloc_mutex_rank_exclusive)) {
return true;
}

View file

@ -6,12 +6,12 @@
#if defined(__linux__) && defined(JEMALLOC_HAVE_GETTID)
# include <errno.h>
# include <fcntl.h>
# include <stdio.h>
# include <stdlib.h> // strtoul
# include <string.h>
# include <unistd.h>
# include <errno.h>
# include <fcntl.h>
# include <stdio.h>
# include <stdlib.h> // strtoul
# include <string.h>
# include <unistd.h>
/*
* Converts a string representing a hexadecimal number to an unsigned long long
@ -25,31 +25,31 @@
*/
static inline unsigned long long int
strtoull_hex(const char *nptr, char **endptr) {
unsigned long long int val = 0;
int ii = 0;
for (; ii < 16; ++ii) {
char c = nptr[ii];
if (c >= '0' && c <= '9') {
val = (val << 4) + (c - '0');
} else if (c >= 'a' && c <= 'f') {
val = (val << 4) + (c - 'a' + 10);
} else {
break;
}
}
if (endptr) {
*endptr = (char *)(nptr + ii);
}
return val;
unsigned long long int val = 0;
int ii = 0;
for (; ii < 16; ++ii) {
char c = nptr[ii];
if (c >= '0' && c <= '9') {
val = (val << 4) + (c - '0');
} else if (c >= 'a' && c <= 'f') {
val = (val << 4) + (c - 'a' + 10);
} else {
break;
}
}
if (endptr) {
*endptr = (char *)(nptr + ii);
}
return val;
}
static int
prof_mapping_containing_addr(uintptr_t addr, const char *maps_path,
uintptr_t *mm_start, uintptr_t *mm_end) {
int ret = ENOENT; /* not found */
*mm_start = *mm_end = 0;
uintptr_t *mm_start, uintptr_t *mm_end) {
int ret = ENOENT; /* not found */
*mm_start = *mm_end = 0;
/*
/*
* Each line of /proc/<pid>/maps is:
* <start>-<end> <perms> <offset> <dev> <inode> <pathname>
*
@ -57,90 +57,93 @@ prof_mapping_containing_addr(uintptr_t addr, const char *maps_path,
* as long as `buf` contains the start of a mapping line it can always be
* parsed.
*/
static const int kMappingFieldsWidth = 34;
static const int kMappingFieldsWidth = 34;
int fd = -1;
char buf[4096];
ssize_t remaining = 0; /* actual number of bytes read to buf */
char *line = NULL;
int fd = -1;
char buf[4096];
ssize_t remaining = 0; /* actual number of bytes read to buf */
char *line = NULL;
while (1) {
if (fd < 0) {
/* case 0: initial open of maps file */
fd = malloc_open(maps_path, O_RDONLY);
if (fd < 0) {
return errno;
}
while (1) {
if (fd < 0) {
/* case 0: initial open of maps file */
fd = malloc_open(maps_path, O_RDONLY);
if (fd < 0) {
return errno;
}
remaining = malloc_read_fd(fd, buf, sizeof(buf));
if (remaining <= 0) {
ret = errno;
break;
}
line = buf;
} else if (line == NULL) {
/* case 1: no newline found in buf */
remaining = malloc_read_fd(fd, buf, sizeof(buf));
if (remaining <= 0) {
ret = errno;
break;
}
line = memchr(buf, '\n', remaining);
if (line != NULL) {
line++; /* advance to character after newline */
remaining -= (line - buf);
}
} else if (line != NULL && remaining < kMappingFieldsWidth) {
/*
remaining = malloc_read_fd(fd, buf, sizeof(buf));
if (remaining <= 0) {
ret = errno;
break;
}
line = buf;
} else if (line == NULL) {
/* case 1: no newline found in buf */
remaining = malloc_read_fd(fd, buf, sizeof(buf));
if (remaining <= 0) {
ret = errno;
break;
}
line = memchr(buf, '\n', remaining);
if (line != NULL) {
line++; /* advance to character after newline */
remaining -= (line - buf);
}
} else if (line != NULL && remaining < kMappingFieldsWidth) {
/*
* case 2: found newline but insufficient characters remaining in
* buf
*/
memcpy(buf, line,
remaining); /* copy remaining characters to start of buf */
line = buf;
memcpy(buf, line,
remaining); /* copy remaining characters to start of buf */
line = buf;
size_t count =
malloc_read_fd(fd, buf + remaining, sizeof(buf) - remaining);
if (count <= 0) {
ret = errno;
break;
}
size_t count = malloc_read_fd(
fd, buf + remaining, sizeof(buf) - remaining);
if (count <= 0) {
ret = errno;
break;
}
remaining += count; /* actual number of bytes read to buf */
} else {
/* case 3: found newline and sufficient characters to parse */
remaining +=
count; /* actual number of bytes read to buf */
} else {
/* case 3: found newline and sufficient characters to parse */
/* parse <start>-<end> */
char *tmp = line;
uintptr_t start_addr = (uintptr_t)strtoull_hex(tmp, &tmp);
if (addr >= start_addr) {
tmp++; /* advance to character after '-' */
uintptr_t end_addr = (uintptr_t)strtoull_hex(tmp, NULL);
if (addr < end_addr) {
*mm_start = start_addr;
*mm_end = end_addr;
ret = 0;
break;
}
}
/* parse <start>-<end> */
char *tmp = line;
uintptr_t start_addr = (uintptr_t)strtoull_hex(
tmp, &tmp);
if (addr >= start_addr) {
tmp++; /* advance to character after '-' */
uintptr_t end_addr = (uintptr_t)strtoull_hex(
tmp, NULL);
if (addr < end_addr) {
*mm_start = start_addr;
*mm_end = end_addr;
ret = 0;
break;
}
}
/* Advance to character after next newline in the current buf. */
char *prev_line = line;
line = memchr(line, '\n', remaining);
if (line != NULL) {
line++; /* advance to character after newline */
remaining -= (line - prev_line);
}
}
}
/* Advance to character after next newline in the current buf. */
char *prev_line = line;
line = memchr(line, '\n', remaining);
if (line != NULL) {
line++; /* advance to character after newline */
remaining -= (line - prev_line);
}
}
}
malloc_close(fd);
return ret;
malloc_close(fd);
return ret;
}
int
prof_thread_stack_range(uintptr_t fp, uintptr_t *low, uintptr_t *high) {
/*
/*
* NOTE: Prior to kernel 4.5 an entry for every thread stack was included in
* /proc/<pid>/maps as [STACK:<tid>]. Starting with kernel 4.5 only the main
* thread stack remains as the [stack] mapping. For other thread stacks the
@ -148,19 +151,19 @@ prof_thread_stack_range(uintptr_t fp, uintptr_t *low, uintptr_t *high) {
* labeled as [STACK:tid]).
* https://lists.ubuntu.com/archives/kernel-team/2016-March/074681.html
*/
char maps_path[64]; // "/proc/<pid>/task/<tid>/maps"
malloc_snprintf(maps_path, sizeof(maps_path), "/proc/%d/task/%d/maps",
getpid(), gettid());
return prof_mapping_containing_addr(fp, maps_path, low, high);
char maps_path[64]; // "/proc/<pid>/task/<tid>/maps"
malloc_snprintf(maps_path, sizeof(maps_path), "/proc/%d/task/%d/maps",
getpid(), gettid());
return prof_mapping_containing_addr(fp, maps_path, low, high);
}
#else
int
prof_thread_stack_range(
UNUSED uintptr_t addr, uintptr_t *stack_start, uintptr_t *stack_end) {
*stack_start = *stack_end = 0;
return ENOENT;
UNUSED uintptr_t addr, uintptr_t *stack_start, uintptr_t *stack_end) {
*stack_start = *stack_end = 0;
return ENOENT;
}
#endif // __linux__
#endif // __linux__

View file

@ -3,8 +3,8 @@
#include "jemalloc/internal/prof_stats.h"
bool opt_prof_stats = false;
malloc_mutex_t prof_stats_mtx;
bool opt_prof_stats = false;
malloc_mutex_t prof_stats_mtx;
static prof_stats_t prof_stats_live[PROF_SC_NSIZES];
static prof_stats_t prof_stats_accum[PROF_SC_NSIZES];

View file

@ -8,8 +8,8 @@
#include "jemalloc/internal/prof_sys.h"
#ifdef JEMALLOC_PROF_LIBUNWIND
#define UNW_LOCAL_ONLY
#include <libunwind.h>
# define UNW_LOCAL_ONLY
# include <libunwind.h>
#endif
#ifdef JEMALLOC_PROF_LIBGCC
@ -18,14 +18,15 @@
* use libgcc's unwinding functionality, but after we've included that, we've
* already hooked _Unwind_Backtrace. We'll temporarily disable hooking.
*/
#undef _Unwind_Backtrace
#include <unwind.h>
#define _Unwind_Backtrace JEMALLOC_TEST_HOOK(_Unwind_Backtrace, test_hooks_libc_hook)
# undef _Unwind_Backtrace
# include <unwind.h>
# define _Unwind_Backtrace \
JEMALLOC_TEST_HOOK(_Unwind_Backtrace, test_hooks_libc_hook)
#endif
#ifdef JEMALLOC_PROF_FRAME_POINTER
// execinfo backtrace() as fallback unwinder
#include <execinfo.h>
# include <execinfo.h>
#endif
/******************************************************************************/
@ -77,7 +78,7 @@ prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) {
static _Unwind_Reason_Code
prof_unwind_callback(struct _Unwind_Context *context, void *arg) {
prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
void *ip;
void *ip;
cassert(config_prof);
@ -115,14 +116,15 @@ struct stack_range {
struct thread_unwind_info {
struct stack_range stack_range;
bool fallback;
bool fallback;
};
static __thread struct thread_unwind_info unwind_info = {
.stack_range = {
.start = 0,
.end = 0,
},
.fallback = false,
.stack_range =
{
.start = 0,
.end = 0,
},
.fallback = false,
}; /* thread local */
static void
@ -142,10 +144,11 @@ prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) {
uintptr_t fp = (uintptr_t)__builtin_frame_address(0);
/* new thread - get the stack range */
if (!unwind_info.fallback &&
unwind_info.stack_range.start == unwind_info.stack_range.end) {
if (!unwind_info.fallback
&& unwind_info.stack_range.start == unwind_info.stack_range.end) {
if (prof_thread_stack_range(fp, &unwind_info.stack_range.start,
&unwind_info.stack_range.end) != 0) {
&unwind_info.stack_range.end)
!= 0) {
unwind_info.fallback = true;
} else {
assert(fp >= unwind_info.stack_range.start
@ -159,8 +162,8 @@ prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) {
unsigned ii = 0;
while (ii < max_len && fp != 0) {
if (fp < unwind_info.stack_range.start ||
fp >= unwind_info.stack_range.end) {
if (fp < unwind_info.stack_range.start
|| fp >= unwind_info.stack_range.end) {
/*
* Determining the stack range from procfs can be
* relatively expensive especially for programs with
@ -173,7 +176,7 @@ prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) {
unwind_info.fallback = true;
goto label_fallback;
}
void* ip = ((void **)fp)[1];
void *ip = ((void **)fp)[1];
if (ip == 0) {
break;
}
@ -205,21 +208,21 @@ JEMALLOC_DIAGNOSTIC_IGNORE_FRAME_ADDRESS
static void
prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) {
/* The input arg must be a constant for __builtin_return_address. */
#define BT_FRAME(i) \
if ((i) < max_len) { \
void *p; \
if (__builtin_frame_address(i) == 0) { \
return; \
} \
p = __builtin_return_address(i); \
if (p == NULL) { \
return; \
} \
vec[(i)] = p; \
*len = (i) + 1; \
} else { \
return; \
}
# define BT_FRAME(i) \
if ((i) < max_len) { \
void *p; \
if (__builtin_frame_address(i) == 0) { \
return; \
} \
p = __builtin_return_address(i); \
if (p == NULL) { \
return; \
} \
vec[(i)] = p; \
*len = (i) + 1; \
} else { \
return; \
}
cassert(config_prof);
assert(vec != NULL);
@ -506,8 +509,8 @@ prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) {
BT_FRAME(253)
BT_FRAME(254)
BT_FRAME(255)
#undef BT_FRAME
JEMALLOC_DIAGNOSTIC_POP
# undef BT_FRAME
JEMALLOC_DIAGNOSTIC_POP
}
#else
static void
@ -568,8 +571,9 @@ prof_sys_thread_name_fetch(tsd_t *tsd) {
return;
}
if (prof_sys_thread_name_read(tdata->thread_name,
PROF_THREAD_NAME_MAX_LEN) != 0) {
if (prof_sys_thread_name_read(
tdata->thread_name, PROF_THREAD_NAME_MAX_LEN)
!= 0) {
prof_thread_name_clear(tdata);
}
@ -592,32 +596,32 @@ prof_get_pid_namespace(void) {
#if defined(_WIN32) || defined(__APPLE__)
// Not supported, do nothing.
#else
char buf[PATH_MAX];
const char* linkname =
# if defined(__FreeBSD__) || defined(__DragonFly__)
char buf[PATH_MAX];
const char *linkname =
# if defined(__FreeBSD__) || defined(__DragonFly__)
"/proc/curproc/ns/pid"
# else
# else
"/proc/self/ns/pid"
# endif
# endif
;
ssize_t linklen =
# ifndef JEMALLOC_READLINKAT
readlink(linkname, buf, PATH_MAX)
# else
readlinkat(AT_FDCWD, linkname, buf, PATH_MAX)
# endif
# ifndef JEMALLOC_READLINKAT
readlink(linkname, buf, PATH_MAX)
# else
readlinkat(AT_FDCWD, linkname, buf, PATH_MAX)
# endif
;
// namespace string is expected to be like pid:[4026531836]
if (linklen > 0) {
// Trim the trailing "]"
buf[linklen-1] = '\0';
char* index = strtok(buf, "pid:[");
buf[linklen - 1] = '\0';
char *index = strtok(buf, "pid:[");
ret = atol(index);
}
#endif
return ret;
return ret;
}
/*
@ -647,8 +651,8 @@ struct prof_dump_arg_s {
};
static void
prof_dump_check_possible_error(prof_dump_arg_t *arg, bool err_cond,
const char *format, ...) {
prof_dump_check_possible_error(
prof_dump_arg_t *arg, bool err_cond, const char *format, ...) {
assert(!arg->error);
if (!err_cond) {
return;
@ -660,7 +664,7 @@ prof_dump_check_possible_error(prof_dump_arg_t *arg, bool err_cond,
}
va_list ap;
char buf[PROF_PRINTF_BUFSIZE];
char buf[PROF_PRINTF_BUFSIZE];
va_start(ap, format);
malloc_vsnprintf(buf, sizeof(buf), format, ap);
va_end(ap);
@ -692,8 +696,8 @@ prof_dump_flush(void *opaque, const char *s) {
cassert(config_prof);
prof_dump_arg_t *arg = (prof_dump_arg_t *)opaque;
if (!arg->error) {
ssize_t err = prof_dump_write_file(arg->prof_dump_fd, s,
strlen(s));
ssize_t err = prof_dump_write_file(
arg->prof_dump_fd, s, strlen(s));
prof_dump_check_possible_error(arg, err == -1,
"<jemalloc>: failed to write during heap profile flush\n");
}
@ -707,36 +711,37 @@ prof_dump_close(prof_dump_arg_t *arg) {
}
#ifdef __APPLE__
#include <mach-o/dyld.h>
# include <mach-o/dyld.h>
#ifdef __LP64__
typedef struct mach_header_64 mach_header_t;
# ifdef __LP64__
typedef struct mach_header_64 mach_header_t;
typedef struct segment_command_64 segment_command_t;
#define MH_MAGIC_VALUE MH_MAGIC_64
#define MH_CIGAM_VALUE MH_CIGAM_64
#define LC_SEGMENT_VALUE LC_SEGMENT_64
#else
typedef struct mach_header mach_header_t;
# define MH_MAGIC_VALUE MH_MAGIC_64
# define MH_CIGAM_VALUE MH_CIGAM_64
# define LC_SEGMENT_VALUE LC_SEGMENT_64
# else
typedef struct mach_header mach_header_t;
typedef struct segment_command segment_command_t;
#define MH_MAGIC_VALUE MH_MAGIC
#define MH_CIGAM_VALUE MH_CIGAM
#define LC_SEGMENT_VALUE LC_SEGMENT
#endif
# define MH_MAGIC_VALUE MH_MAGIC
# define MH_CIGAM_VALUE MH_CIGAM
# define LC_SEGMENT_VALUE LC_SEGMENT
# endif
static void
prof_dump_dyld_image_vmaddr(buf_writer_t *buf_writer, uint32_t image_index) {
const mach_header_t *header = (const mach_header_t *)
_dyld_get_image_header(image_index);
if (header == NULL || (header->magic != MH_MAGIC_VALUE &&
header->magic != MH_CIGAM_VALUE)) {
if (header == NULL
|| (header->magic != MH_MAGIC_VALUE
&& header->magic != MH_CIGAM_VALUE)) {
// Invalid header
return;
}
intptr_t slide = _dyld_get_image_vmaddr_slide(image_index);
const char *name = _dyld_get_image_name(image_index);
struct load_command *load_cmd = (struct load_command *)
((char *)header + sizeof(mach_header_t));
intptr_t slide = _dyld_get_image_vmaddr_slide(image_index);
const char *name = _dyld_get_image_name(image_index);
struct load_command *load_cmd = (struct load_command *)((char *)header
+ sizeof(mach_header_t));
for (uint32_t i = 0; load_cmd && (i < header->ncmds); i++) {
if (load_cmd->cmd == LC_SEGMENT_VALUE) {
const segment_command_t *segment_cmd =
@ -744,14 +749,17 @@ prof_dump_dyld_image_vmaddr(buf_writer_t *buf_writer, uint32_t image_index) {
if (!strcmp(segment_cmd->segname, "__TEXT")) {
char buffer[PATH_MAX + 1];
malloc_snprintf(buffer, sizeof(buffer),
"%016llx-%016llx: %s\n", segment_cmd->vmaddr + slide,
segment_cmd->vmaddr + slide + segment_cmd->vmsize, name);
"%016llx-%016llx: %s\n",
segment_cmd->vmaddr + slide,
segment_cmd->vmaddr + slide
+ segment_cmd->vmsize,
name);
buf_writer_cb(buf_writer, buffer);
return;
}
}
load_cmd =
(struct load_command *)((char *)load_cmd + load_cmd->cmdsize);
load_cmd = (struct load_command *)((char *)load_cmd
+ load_cmd->cmdsize);
}
}
@ -772,48 +780,48 @@ prof_dump_maps(buf_writer_t *buf_writer) {
prof_dump_dyld_maps(buf_writer);
}
#else /* !__APPLE__ */
#ifndef _WIN32
# ifndef _WIN32
JEMALLOC_FORMAT_PRINTF(1, 2)
static int
prof_open_maps_internal(const char *format, ...) {
int mfd;
int mfd;
va_list ap;
char filename[PATH_MAX + 1];
char filename[PATH_MAX + 1];
va_start(ap, format);
malloc_vsnprintf(filename, sizeof(filename), format, ap);
va_end(ap);
#if defined(O_CLOEXEC)
# if defined(O_CLOEXEC)
mfd = open(filename, O_RDONLY | O_CLOEXEC);
#else
# else
mfd = open(filename, O_RDONLY);
if (mfd != -1) {
fcntl(mfd, F_SETFD, fcntl(mfd, F_GETFD) | FD_CLOEXEC);
}
#endif
# endif
return mfd;
}
#endif
# endif
static int
prof_dump_open_maps_impl(void) {
int mfd;
cassert(config_prof);
#if defined(__FreeBSD__) || defined(__DragonFly__)
# if defined(__FreeBSD__) || defined(__DragonFly__)
mfd = prof_open_maps_internal("/proc/curproc/map");
#elif defined(_WIN32)
# elif defined(_WIN32)
mfd = -1; // Not implemented
#else
# else
int pid = prof_getpid();
mfd = prof_open_maps_internal("/proc/%d/task/%d/maps", pid, pid);
if (mfd == -1) {
mfd = prof_open_maps_internal("/proc/%d/maps", pid);
}
#endif
# endif
return mfd;
}
prof_dump_open_maps_t *JET_MUTABLE prof_dump_open_maps =
@ -840,12 +848,12 @@ prof_dump_maps(buf_writer_t *buf_writer) {
#endif /* __APPLE__ */
static bool
prof_dump(tsd_t *tsd, bool propagate_err, const char *filename,
bool leakcheck) {
prof_dump(
tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck) {
cassert(config_prof);
assert(tsd_reentrancy_level_get(tsd) == 0);
prof_tdata_t * tdata = prof_tdata_get(tsd, true);
prof_tdata_t *tdata = prof_tdata_get(tsd, true);
if (tdata == NULL) {
return true;
}
@ -892,7 +900,7 @@ prof_strncpy(char *UNUSED dest, const char *UNUSED src, size_t UNUSED size) {
}
static const char *
prof_prefix_get(tsdn_t* tsdn) {
prof_prefix_get(tsdn_t *tsdn) {
malloc_mutex_assert_owner(tsdn, &prof_dump_filename_mtx);
return prof_prefix == NULL ? opt_prof_prefix : prof_prefix;
@ -919,25 +927,26 @@ prof_dump_filename(tsd_t *tsd, char *filename, char v, uint64_t vseq) {
if (opt_prof_pid_namespace) {
/* "<prefix>.<pid_namespace>.<pid>.<seq>.v<vseq>.heap" */
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
"%s.%ld.%d.%"FMTu64".%c%"FMTu64".heap", prefix,
prof_get_pid_namespace(), prof_getpid(), prof_dump_seq, v,
vseq);
"%s.%ld.%d.%" FMTu64 ".%c%" FMTu64 ".heap", prefix,
prof_get_pid_namespace(), prof_getpid(),
prof_dump_seq, v, vseq);
} else {
/* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
"%s.%d.%"FMTu64".%c%"FMTu64".heap", prefix, prof_getpid(),
prof_dump_seq, v, vseq);
"%s.%d.%" FMTu64 ".%c%" FMTu64 ".heap", prefix,
prof_getpid(), prof_dump_seq, v, vseq);
}
} else {
if (opt_prof_pid_namespace) {
/* "<prefix>.<pid_namespace>.<pid>.<seq>.<v>.heap" */
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
"%s.%ld.%d.%"FMTu64".%c.heap", prefix,
prof_get_pid_namespace(), prof_getpid(), prof_dump_seq, v);
"%s.%ld.%d.%" FMTu64 ".%c.heap", prefix,
prof_get_pid_namespace(), prof_getpid(),
prof_dump_seq, v);
} else {
/* "<prefix>.<pid>.<seq>.<v>.heap" */
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
"%s.%d.%"FMTu64".%c.heap", prefix, prof_getpid(),
"%s.%d.%" FMTu64 ".%c.heap", prefix, prof_getpid(),
prof_dump_seq, v);
}
}
@ -949,11 +958,12 @@ prof_get_default_filename(tsdn_t *tsdn, char *filename, uint64_t ind) {
malloc_mutex_lock(tsdn, &prof_dump_filename_mtx);
if (opt_prof_pid_namespace) {
malloc_snprintf(filename, PROF_DUMP_FILENAME_LEN,
"%s.%ld.%d.%"FMTu64".json", prof_prefix_get(tsdn),
"%s.%ld.%d.%" FMTu64 ".json", prof_prefix_get(tsdn),
prof_get_pid_namespace(), prof_getpid(), ind);
} else {
malloc_snprintf(filename, PROF_DUMP_FILENAME_LEN,
"%s.%d.%"FMTu64".json", prof_prefix_get(tsdn), prof_getpid(), ind);
"%s.%d.%" FMTu64 ".json", prof_prefix_get(tsdn),
prof_getpid(), ind);
}
malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx);
}
@ -980,8 +990,8 @@ prof_prefix_set(tsdn_t *tsdn, const char *prefix) {
if (prof_prefix == NULL) {
malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx);
/* Everything is still guarded by ctl_mtx. */
char *buffer = base_alloc(tsdn, prof_base,
PROF_DUMP_FILENAME_LEN, QUANTUM);
char *buffer = base_alloc(
tsdn, prof_base, PROF_DUMP_FILENAME_LEN, QUANTUM);
if (buffer == NULL) {
return true;
}
@ -1018,7 +1028,8 @@ prof_mdump_impl(tsd_t *tsd, const char *filename) {
/* No filename specified, so automatically generate one. */
malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
if (prof_prefix_get(tsd_tsdn(tsd))[0] == '\0') {
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
malloc_mutex_unlock(
tsd_tsdn(tsd), &prof_dump_filename_mtx);
return true;
}
prof_dump_filename(tsd, filename_buf, 'm', prof_dump_mseq);

View file

@ -22,8 +22,8 @@ prof_threshold_hook_set(prof_threshold_hook_t hook) {
prof_threshold_hook_t
prof_threshold_hook_get(void) {
return (prof_threshold_hook_t)atomic_load_p(&prof_threshold_hook,
ATOMIC_ACQUIRE);
return (prof_threshold_hook_t)atomic_load_p(
&prof_threshold_hook, ATOMIC_ACQUIRE);
}
/* Invoke callback for threshold reached */
@ -32,10 +32,10 @@ prof_threshold_update(tsd_t *tsd) {
prof_threshold_hook_t prof_threshold_hook = prof_threshold_hook_get();
if (prof_threshold_hook == NULL) {
return;
}
}
uint64_t alloc = tsd_thread_allocated_get(tsd);
uint64_t dalloc = tsd_thread_deallocated_get(tsd);
peak_t *peak = tsd_peakp_get(tsd);
peak_t *peak = tsd_peakp_get(tsd);
pre_reentrancy(tsd, NULL);
prof_threshold_hook(alloc, dalloc, peak->cur_max);
post_reentrancy(tsd);
@ -62,8 +62,8 @@ prof_threshold_enabled(void) {
}
te_base_cb_t prof_threshold_te_handler = {
.enabled = &prof_threshold_enabled,
.new_event_wait = &prof_threshold_new_event_wait,
.postponed_event_wait = &prof_threshold_postponed_event_wait,
.event_handler = &prof_threshold_event_handler,
.enabled = &prof_threshold_enabled,
.new_event_wait = &prof_threshold_new_event_wait,
.postponed_event_wait = &prof_threshold_postponed_event_wait,
.event_handler = &prof_threshold_event_handler,
};

View file

@ -32,16 +32,16 @@ psset_stats_accum(psset_stats_t *dst, psset_stats_t *src) {
psset_bin_stats_accum(&dst->merged, &src->merged);
for (int huge = 0; huge < PSSET_NHUGE; huge++) {
psset_bin_stats_accum(&dst->slabs[huge], &src->slabs[huge]);
psset_bin_stats_accum(&dst->full_slabs[huge],
&src->full_slabs[huge]);
psset_bin_stats_accum(&dst->empty_slabs[huge],
&src->empty_slabs[huge]);
psset_bin_stats_accum(
&dst->full_slabs[huge], &src->full_slabs[huge]);
psset_bin_stats_accum(
&dst->empty_slabs[huge], &src->empty_slabs[huge]);
}
for (pszind_t i = 0; i < PSSET_NPSIZES; i++) {
psset_bin_stats_accum(&dst->nonfull_slabs[i][0],
&src->nonfull_slabs[i][0]);
psset_bin_stats_accum(&dst->nonfull_slabs[i][1],
&src->nonfull_slabs[i][1]);
psset_bin_stats_accum(
&dst->nonfull_slabs[i][0], &src->nonfull_slabs[i][0]);
psset_bin_stats_accum(
&dst->nonfull_slabs[i][1], &src->nonfull_slabs[i][1]);
}
}
@ -83,10 +83,10 @@ psset_slab_stats_insert_remove(psset_stats_t *stats,
if (config_debug) {
psset_bin_stats_t check_stats[PSSET_NHUGE] = {{0}};
for (int huge = 0; huge < PSSET_NHUGE; huge++) {
psset_bin_stats_accum(&check_stats[huge],
&stats->full_slabs[huge]);
psset_bin_stats_accum(&check_stats[huge],
&stats->empty_slabs[huge]);
psset_bin_stats_accum(
&check_stats[huge], &stats->full_slabs[huge]);
psset_bin_stats_accum(
&check_stats[huge], &stats->empty_slabs[huge]);
for (pszind_t pind = 0; pind < PSSET_NPSIZES; pind++) {
psset_bin_stats_accum(&check_stats[huge],
&stats->nonfull_slabs[pind][huge]);
@ -112,14 +112,14 @@ psset_slab_stats_insert_remove(psset_stats_t *stats,
}
static void
psset_slab_stats_insert(psset_stats_t *stats, psset_bin_stats_t *binstats,
hpdata_t *ps) {
psset_slab_stats_insert(
psset_stats_t *stats, psset_bin_stats_t *binstats, hpdata_t *ps) {
psset_slab_stats_insert_remove(stats, binstats, ps, true);
}
static void
psset_slab_stats_remove(psset_stats_t *stats, psset_bin_stats_t *binstats,
hpdata_t *ps) {
psset_slab_stats_remove(
psset_stats_t *stats, psset_bin_stats_t *binstats, hpdata_t *ps) {
psset_slab_stats_insert_remove(stats, binstats, ps, false);
}
@ -127,9 +127,9 @@ static pszind_t
psset_hpdata_heap_index(const hpdata_t *ps) {
assert(!hpdata_full(ps));
assert(!hpdata_empty(ps));
size_t longest_free_range = hpdata_longest_free_range_get(ps);
pszind_t pind = sz_psz2ind(sz_psz_quantize_floor(
longest_free_range << LG_PAGE));
size_t longest_free_range = hpdata_longest_free_range_get(ps);
pszind_t pind = sz_psz2ind(
sz_psz_quantize_floor(longest_free_range << LG_PAGE));
assert(pind < PSSET_NPSIZES);
return pind;
}
@ -161,8 +161,8 @@ psset_stats_insert(psset_t *psset, hpdata_t *ps) {
psset_slab_stats_insert(stats, psset->stats.full_slabs, ps);
} else {
pszind_t pind = psset_hpdata_heap_index(ps);
psset_slab_stats_insert(stats, psset->stats.nonfull_slabs[pind],
ps);
psset_slab_stats_insert(
stats, psset->stats.nonfull_slabs[pind], ps);
}
}
@ -175,8 +175,8 @@ psset_stats_remove(psset_t *psset, hpdata_t *ps) {
psset_slab_stats_remove(stats, psset->stats.full_slabs, ps);
} else {
pszind_t pind = psset_hpdata_heap_index(ps);
psset_slab_stats_remove(stats, psset->stats.nonfull_slabs[pind],
ps);
psset_slab_stats_remove(
stats, psset->stats.nonfull_slabs[pind], ps);
}
}
@ -264,7 +264,7 @@ psset_maybe_remove_purge_list(psset_t *psset, hpdata_t *ps) {
* purge LRU within a given dirtiness bucket.
*/
if (hpdata_purge_allowed_get(ps)) {
size_t ind = psset_purge_list_ind(ps);
size_t ind = psset_purge_list_ind(ps);
hpdata_purge_list_t *purge_list = &psset->to_purge[ind];
hpdata_purge_list_remove(purge_list, ps);
if (hpdata_purge_list_empty(purge_list)) {
@ -276,14 +276,13 @@ psset_maybe_remove_purge_list(psset_t *psset, hpdata_t *ps) {
static void
psset_maybe_insert_purge_list(psset_t *psset, hpdata_t *ps) {
if (hpdata_purge_allowed_get(ps)) {
size_t ind = psset_purge_list_ind(ps);
size_t ind = psset_purge_list_ind(ps);
hpdata_purge_list_t *purge_list = &psset->to_purge[ind];
if (hpdata_purge_list_empty(purge_list)) {
fb_set(psset->purge_bitmap, PSSET_NPURGE_LISTS, ind);
}
hpdata_purge_list_append(purge_list, ps);
}
}
void
@ -343,13 +342,13 @@ psset_enumerate_search(psset_t *psset, pszind_t pind, size_t size) {
return NULL;
}
hpdata_t *ps = NULL;
hpdata_t *ps = NULL;
hpdata_age_heap_enumerate_helper_t helper;
hpdata_age_heap_enumerate_prepare(&psset->pageslabs[pind], &helper,
PSSET_ENUMERATE_MAX_NUM, sizeof(helper.bfs_queue) / sizeof(void *));
while ((ps = hpdata_age_heap_enumerate_next(&psset->pageslabs[pind],
&helper))) {
while ((ps = hpdata_age_heap_enumerate_next(
&psset->pageslabs[pind], &helper))) {
if (hpdata_longest_free_range_get(ps) >= size) {
return ps;
}
@ -363,7 +362,7 @@ psset_pick_alloc(psset_t *psset, size_t size) {
assert((size & PAGE_MASK) == 0);
assert(size <= HUGEPAGE);
pszind_t min_pind = sz_psz2ind(sz_psz_quantize_ceil(size));
pszind_t min_pind = sz_psz2ind(sz_psz_quantize_ceil(size));
hpdata_t *ps = NULL;
/* See comments in eset_first_fit for why we enumerate search below. */
@ -375,8 +374,8 @@ psset_pick_alloc(psset_t *psset, size_t size) {
}
}
pszind_t pind = (pszind_t)fb_ffs(psset->pageslab_bitmap, PSSET_NPSIZES,
(size_t)min_pind);
pszind_t pind = (pszind_t)fb_ffs(
psset->pageslab_bitmap, PSSET_NPSIZES, (size_t)min_pind);
if (pind == PSSET_NPSIZES) {
return hpdata_empty_list_first(&psset->empty);
}
@ -392,8 +391,8 @@ psset_pick_alloc(psset_t *psset, size_t size) {
hpdata_t *
psset_pick_purge(psset_t *psset) {
ssize_t ind_ssz = fb_fls(psset->purge_bitmap, PSSET_NPURGE_LISTS,
PSSET_NPURGE_LISTS - 1);
ssize_t ind_ssz = fb_fls(
psset->purge_bitmap, PSSET_NPURGE_LISTS, PSSET_NPURGE_LISTS - 1);
if (ind_ssz < 0) {
return NULL;
}

View file

@ -20,7 +20,7 @@ rtree_new(rtree_t *rtree, base_t *base, bool zeroed) {
rtree->base = base;
if (malloc_mutex_init(&rtree->init_lock, "rtree", WITNESS_RANK_RTREE,
malloc_mutex_rank_exclusive)) {
malloc_mutex_rank_exclusive)) {
return true;
}
@ -29,19 +29,19 @@ rtree_new(rtree_t *rtree, base_t *base, bool zeroed) {
static rtree_node_elm_t *
rtree_node_alloc(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
return (rtree_node_elm_t *)base_alloc_rtree(tsdn, rtree->base,
nelms * sizeof(rtree_node_elm_t));
return (rtree_node_elm_t *)base_alloc_rtree(
tsdn, rtree->base, nelms * sizeof(rtree_node_elm_t));
}
static rtree_leaf_elm_t *
rtree_leaf_alloc(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
return (rtree_leaf_elm_t *)base_alloc_rtree(tsdn, rtree->base,
nelms * sizeof(rtree_leaf_elm_t));
return (rtree_leaf_elm_t *)base_alloc_rtree(
tsdn, rtree->base, nelms * sizeof(rtree_leaf_elm_t));
}
static rtree_node_elm_t *
rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level,
atomic_p_t *elmp) {
rtree_node_init(
tsdn_t *tsdn, rtree_t *rtree, unsigned level, atomic_p_t *elmp) {
malloc_mutex_lock(tsdn, &rtree->init_lock);
/*
* If *elmp is non-null, then it was initialized with the init lock
@ -49,8 +49,8 @@ rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level,
*/
rtree_node_elm_t *node = atomic_load_p(elmp, ATOMIC_RELAXED);
if (node == NULL) {
node = rtree_node_alloc(tsdn, rtree, ZU(1) <<
rtree_levels[level].bits);
node = rtree_node_alloc(
tsdn, rtree, ZU(1) << rtree_levels[level].bits);
if (node == NULL) {
malloc_mutex_unlock(tsdn, &rtree->init_lock);
return NULL;
@ -75,8 +75,8 @@ rtree_leaf_init(tsdn_t *tsdn, rtree_t *rtree, atomic_p_t *elmp) {
*/
rtree_leaf_elm_t *leaf = atomic_load_p(elmp, ATOMIC_RELAXED);
if (leaf == NULL) {
leaf = rtree_leaf_alloc(tsdn, rtree, ZU(1) <<
rtree_levels[RTREE_HEIGHT-1].bits);
leaf = rtree_leaf_alloc(
tsdn, rtree, ZU(1) << rtree_levels[RTREE_HEIGHT - 1].bits);
if (leaf == NULL) {
malloc_mutex_unlock(tsdn, &rtree->init_lock);
return NULL;
@ -107,11 +107,11 @@ rtree_child_node_tryread(rtree_node_elm_t *elm, bool dependent) {
rtree_node_elm_t *node;
if (dependent) {
node = (rtree_node_elm_t *)atomic_load_p(&elm->child,
ATOMIC_RELAXED);
node = (rtree_node_elm_t *)atomic_load_p(
&elm->child, ATOMIC_RELAXED);
} else {
node = (rtree_node_elm_t *)atomic_load_p(&elm->child,
ATOMIC_ACQUIRE);
node = (rtree_node_elm_t *)atomic_load_p(
&elm->child, ATOMIC_ACQUIRE);
}
assert(!dependent || node != NULL);
@ -136,11 +136,11 @@ rtree_child_leaf_tryread(rtree_node_elm_t *elm, bool dependent) {
rtree_leaf_elm_t *leaf;
if (dependent) {
leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child,
ATOMIC_RELAXED);
leaf = (rtree_leaf_elm_t *)atomic_load_p(
&elm->child, ATOMIC_RELAXED);
} else {
leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child,
ATOMIC_ACQUIRE);
leaf = (rtree_leaf_elm_t *)atomic_load_p(
&elm->child, ATOMIC_ACQUIRE);
}
assert(!dependent || leaf != NULL);
@ -181,53 +181,54 @@ rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
}
}
#define RTREE_GET_CHILD(level) { \
assert(level < RTREE_HEIGHT-1); \
if (level != 0 && !dependent && \
unlikely(!rtree_node_valid(node))) { \
return NULL; \
} \
uintptr_t subkey = rtree_subkey(key, level); \
if (level + 2 < RTREE_HEIGHT) { \
node = init_missing ? \
rtree_child_node_read(tsdn, rtree, \
&node[subkey], level, dependent) : \
rtree_child_node_tryread(&node[subkey], \
dependent); \
} else { \
leaf = init_missing ? \
rtree_child_leaf_read(tsdn, rtree, \
&node[subkey], level, dependent) : \
rtree_child_leaf_tryread(&node[subkey], \
dependent); \
} \
#define RTREE_GET_CHILD(level) \
{ \
assert(level < RTREE_HEIGHT - 1); \
if (level != 0 && !dependent \
&& unlikely(!rtree_node_valid(node))) { \
return NULL; \
} \
uintptr_t subkey = rtree_subkey(key, level); \
if (level + 2 < RTREE_HEIGHT) { \
node = init_missing \
? rtree_child_node_read(tsdn, rtree, \
&node[subkey], level, dependent) \
: rtree_child_node_tryread( \
&node[subkey], dependent); \
} else { \
leaf = init_missing \
? rtree_child_leaf_read(tsdn, rtree, \
&node[subkey], level, dependent) \
: rtree_child_leaf_tryread( \
&node[subkey], dependent); \
} \
}
/*
* Cache replacement upon hard lookup (i.e. L1 & L2 rtree cache miss):
* (1) evict last entry in L2 cache; (2) move the collision slot from L1
* cache down to L2; and 3) fill L1.
*/
#define RTREE_GET_LEAF(level) { \
assert(level == RTREE_HEIGHT-1); \
if (!dependent && unlikely(!rtree_leaf_valid(leaf))) { \
return NULL; \
} \
if (RTREE_CTX_NCACHE_L2 > 1) { \
memmove(&rtree_ctx->l2_cache[1], \
&rtree_ctx->l2_cache[0], \
sizeof(rtree_ctx_cache_elm_t) * \
(RTREE_CTX_NCACHE_L2 - 1)); \
} \
size_t slot = rtree_cache_direct_map(key); \
rtree_ctx->l2_cache[0].leafkey = \
rtree_ctx->cache[slot].leafkey; \
rtree_ctx->l2_cache[0].leaf = \
rtree_ctx->cache[slot].leaf; \
uintptr_t leafkey = rtree_leafkey(key); \
rtree_ctx->cache[slot].leafkey = leafkey; \
rtree_ctx->cache[slot].leaf = leaf; \
uintptr_t subkey = rtree_subkey(key, level); \
return &leaf[subkey]; \
#define RTREE_GET_LEAF(level) \
{ \
assert(level == RTREE_HEIGHT - 1); \
if (!dependent && unlikely(!rtree_leaf_valid(leaf))) { \
return NULL; \
} \
if (RTREE_CTX_NCACHE_L2 > 1) { \
memmove(&rtree_ctx->l2_cache[1], \
&rtree_ctx->l2_cache[0], \
sizeof(rtree_ctx_cache_elm_t) \
* (RTREE_CTX_NCACHE_L2 - 1)); \
} \
size_t slot = rtree_cache_direct_map(key); \
rtree_ctx->l2_cache[0].leafkey = \
rtree_ctx->cache[slot].leafkey; \
rtree_ctx->l2_cache[0].leaf = rtree_ctx->cache[slot].leaf; \
uintptr_t leafkey = rtree_leafkey(key); \
rtree_ctx->cache[slot].leafkey = leafkey; \
rtree_ctx->cache[slot].leaf = leaf; \
uintptr_t subkey = rtree_subkey(key, level); \
return &leaf[subkey]; \
}
if (RTREE_HEIGHT > 1) {
RTREE_GET_CHILD(0)
@ -236,11 +237,11 @@ rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
RTREE_GET_CHILD(1)
}
if (RTREE_HEIGHT > 3) {
for (unsigned i = 2; i < RTREE_HEIGHT-1; i++) {
for (unsigned i = 2; i < RTREE_HEIGHT - 1; i++) {
RTREE_GET_CHILD(i)
}
}
RTREE_GET_LEAF(RTREE_HEIGHT-1)
RTREE_GET_LEAF(RTREE_HEIGHT - 1)
#undef RTREE_GET_CHILD
#undef RTREE_GET_LEAF
not_reached();

View file

@ -3,20 +3,24 @@
static safety_check_abort_hook_t safety_check_abort;
void safety_check_fail_sized_dealloc(bool current_dealloc, const void *ptr,
void
safety_check_fail_sized_dealloc(bool current_dealloc, const void *ptr,
size_t true_size, size_t input_size) {
char *src = current_dealloc ? "the current pointer being freed" :
"in thread cache, possibly from previous deallocations";
char *src = current_dealloc
? "the current pointer being freed"
: "in thread cache, possibly from previous deallocations";
char *suggest_debug_build = config_debug ? "" : " --enable-debug or";
safety_check_fail("<jemalloc>: size mismatch detected (true size %zu "
safety_check_fail(
"<jemalloc>: size mismatch detected (true size %zu "
"vs input size %zu), likely caused by application sized "
"deallocation bugs (source address: %p, %s). Suggest building with"
"%s address sanitizer for debugging. Abort.\n",
true_size, input_size, ptr, src, suggest_debug_build);
}
void safety_check_set_abort(safety_check_abort_hook_t abort_fn) {
void
safety_check_set_abort(safety_check_abort_hook_t abort_fn) {
safety_check_abort = abort_fn;
}
@ -25,7 +29,8 @@ void safety_check_set_abort(safety_check_abort_hook_t abort_fn) {
* because there are cases only logging crash stack traces.
*/
static void
safety_check_detected_heap_corruption___run_address_sanitizer_build_to_debug(const char *buf) {
safety_check_detected_heap_corruption___run_address_sanitizer_build_to_debug(
const char *buf) {
if (safety_check_abort == NULL) {
malloc_write(buf);
abort();
@ -34,7 +39,8 @@ safety_check_detected_heap_corruption___run_address_sanitizer_build_to_debug(con
}
}
void safety_check_fail(const char *format, ...) {
void
safety_check_fail(const char *format, ...) {
char buf[MALLOC_PRINTF_BUFSIZE];
va_list ap;
@ -42,5 +48,6 @@ void safety_check_fail(const char *format, ...) {
malloc_vsnprintf(buf, MALLOC_PRINTF_BUFSIZE, format, ap);
va_end(ap);
safety_check_detected_heap_corruption___run_address_sanitizer_build_to_debug(buf);
safety_check_detected_heap_corruption___run_address_sanitizer_build_to_debug(
buf);
}

View file

@ -20,8 +20,8 @@ ssize_t opt_lg_san_uaf_align = SAN_LG_UAF_ALIGN_DEFAULT;
uintptr_t san_cache_bin_nonfast_mask = SAN_CACHE_BIN_NONFAST_MASK_DEFAULT;
static inline void
san_find_guarded_addr(edata_t *edata, void **guard1, void **guard2,
void **addr, size_t size, bool left, bool right) {
san_find_guarded_addr(edata_t *edata, void **guard1, void **guard2, void **addr,
size_t size, bool left, bool right) {
assert(!edata_guarded_get(edata));
assert(size % PAGE == 0);
*addr = edata_base_get(edata);
@ -74,8 +74,8 @@ san_guard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, emap_t *emap,
: san_one_side_unguarded_sz(size_with_guards);
void *guard1, *guard2, *addr;
san_find_guarded_addr(edata, &guard1, &guard2, &addr, usize, left,
right);
san_find_guarded_addr(
edata, &guard1, &guard2, &addr, usize, left, right);
assert(edata_state_get(edata) == extent_state_active);
ehooks_guard(tsdn, ehooks, guard1, guard2);
@ -109,8 +109,8 @@ san_unguard_pages_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
: san_one_side_guarded_sz(size);
void *guard1, *guard2, *addr;
san_find_unguarded_addr(edata, &guard1, &guard2, &addr, size, left,
right);
san_find_unguarded_addr(
edata, &guard1, &guard2, &addr, size, left, right);
ehooks_unguard(tsdn, ehooks, (void *)guard1, (void *)guard2);
@ -130,15 +130,15 @@ san_unguard_pages_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
}
void
san_unguard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
emap_t *emap, bool left, bool right) {
san_unguard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, emap_t *emap,
bool left, bool right) {
san_unguard_pages_impl(tsdn, ehooks, edata, emap, left, right,
/* remap */ true);
}
void
san_unguard_pages_pre_destroy(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
emap_t *emap) {
san_unguard_pages_pre_destroy(
tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, emap_t *emap) {
emap_assert_not_mapped(tsdn, emap, edata);
/*
* We don't want to touch the emap of about to be destroyed extents, as
@ -146,7 +146,7 @@ san_unguard_pages_pre_destroy(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
* we unguard the extents to the right, because retained extents only
* own their right guard page per san_bump_alloc's logic.
*/
san_unguard_pages_impl(tsdn, ehooks, edata, emap, /* left */ false,
san_unguard_pages_impl(tsdn, ehooks, edata, emap, /* left */ false,
/* right */ true, /* remap */ false);
}
@ -163,9 +163,9 @@ san_stashed_corrupted(void *ptr, size_t size) {
void *first, *mid, *last;
san_junk_ptr_locations(ptr, size, &first, &mid, &last);
if (*(uintptr_t *)first != uaf_detect_junk ||
*(uintptr_t *)mid != uaf_detect_junk ||
*(uintptr_t *)last != uaf_detect_junk) {
if (*(uintptr_t *)first != uaf_detect_junk
|| *(uintptr_t *)mid != uaf_detect_junk
|| *(uintptr_t *)last != uaf_detect_junk) {
return true;
}
@ -183,7 +183,8 @@ san_check_stashed_ptrs(void **ptrs, size_t nstashed, size_t usize) {
assert(stashed != NULL);
assert(cache_bin_nonfast_aligned(stashed));
if (unlikely(san_stashed_corrupted(stashed, usize))) {
safety_check_fail("<jemalloc>: Write-after-free "
safety_check_fail(
"<jemalloc>: Write-after-free "
"detected on deallocated pointer %p (size %zu).\n",
stashed, usize);
}

View file

@ -7,30 +7,29 @@
#include "jemalloc/internal/ehooks.h"
#include "jemalloc/internal/edata_cache.h"
static bool
san_bump_grow_locked(tsdn_t *tsdn, san_bump_alloc_t *sba, pac_t *pac,
ehooks_t *ehooks, size_t size);
static bool san_bump_grow_locked(tsdn_t *tsdn, san_bump_alloc_t *sba,
pac_t *pac, ehooks_t *ehooks, size_t size);
edata_t *
san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t* sba, pac_t *pac,
san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t *sba, pac_t *pac,
ehooks_t *ehooks, size_t size, bool zero) {
assert(san_bump_enabled());
edata_t* to_destroy;
size_t guarded_size = san_one_side_guarded_sz(size);
edata_t *to_destroy;
size_t guarded_size = san_one_side_guarded_sz(size);
malloc_mutex_lock(tsdn, &sba->mtx);
if (sba->curr_reg == NULL ||
edata_size_get(sba->curr_reg) < guarded_size) {
if (sba->curr_reg == NULL
|| edata_size_get(sba->curr_reg) < guarded_size) {
/*
* If the current region can't accommodate the allocation,
* try replacing it with a larger one and destroy current if the
* replacement succeeds.
*/
to_destroy = sba->curr_reg;
bool err = san_bump_grow_locked(tsdn, sba, pac, ehooks,
guarded_size);
bool err = san_bump_grow_locked(
tsdn, sba, pac, ehooks, guarded_size);
if (err) {
goto label_err;
}
@ -40,9 +39,9 @@ san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t* sba, pac_t *pac,
assert(guarded_size <= edata_size_get(sba->curr_reg));
size_t trail_size = edata_size_get(sba->curr_reg) - guarded_size;
edata_t* edata;
edata_t *edata;
if (trail_size != 0) {
edata_t* curr_reg_trail = extent_split_wrapper(tsdn, pac,
edata_t *curr_reg_trail = extent_split_wrapper(tsdn, pac,
ehooks, sba->curr_reg, guarded_size, trail_size,
/* holding_core_locks */ true);
if (curr_reg_trail == NULL) {
@ -69,9 +68,8 @@ san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t* sba, pac_t *pac,
/* right */ true, /* remap */ true);
if (extent_commit_zero(tsdn, ehooks, edata, /* commit */ true, zero,
/* growing_retained */ false)) {
extent_record(tsdn, pac, ehooks, &pac->ecache_retained,
edata);
/* growing_retained */ false)) {
extent_record(tsdn, pac, ehooks, &pac->ecache_retained, edata);
return NULL;
}
@ -90,9 +88,10 @@ san_bump_grow_locked(tsdn_t *tsdn, san_bump_alloc_t *sba, pac_t *pac,
ehooks_t *ehooks, size_t size) {
malloc_mutex_assert_owner(tsdn, &sba->mtx);
bool committed = false, zeroed = false;
size_t alloc_size = size > SBA_RETAINED_ALLOC_SIZE ? size :
SBA_RETAINED_ALLOC_SIZE;
bool committed = false, zeroed = false;
size_t alloc_size = size > SBA_RETAINED_ALLOC_SIZE
? size
: SBA_RETAINED_ALLOC_SIZE;
assert((alloc_size & PAGE_MASK) == 0);
sba->curr_reg = extent_alloc_wrapper(tsdn, pac, ehooks, NULL,
alloc_size, PAGE, zeroed, &committed,

View file

@ -27,7 +27,7 @@ slab_size(int lg_page, int lg_base, int lg_delta, int ndelta) {
size_t try_slab_size = page;
size_t try_nregs = try_slab_size / reg_size;
size_t perfect_slab_size = 0;
bool perfect = false;
bool perfect = false;
/*
* This loop continues until we find the least common multiple of the
* page size and size class size. Size classes are all of the form
@ -106,7 +106,7 @@ size_classes(
/* Outputs that we update as we go. */
size_t lookup_maxclass = 0;
size_t small_maxclass = 0;
int lg_large_minclass = 0;
int lg_large_minclass = 0;
size_t large_maxclass = 0;
/* Tiny size classes. */
@ -209,7 +209,7 @@ size_classes(
lg_delta++;
}
/* Additional outputs. */
int nsizes = index;
int nsizes = index;
unsigned lg_ceil_nsizes = lg_ceil(nsizes);
/* Fill in the output data. */
@ -292,8 +292,8 @@ sc_data_update_slab_size(sc_data_t *data, size_t begin, size_t end, int pgs) {
if (!sc->bin) {
break;
}
size_t reg_size = reg_size_compute(sc->lg_base, sc->lg_delta,
sc->ndelta);
size_t reg_size = reg_size_compute(
sc->lg_base, sc->lg_delta, sc->ndelta);
if (begin <= reg_size && reg_size <= end) {
sc_data_update_sc_slab_size(sc, reg_size, pgs);
}

View file

@ -6,12 +6,12 @@
static edata_t *sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
size_t alignment, bool zero, bool guarded, bool frequent_reuse,
bool *deferred_work_generated);
static bool sec_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated);
static bool sec_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
size_t old_size, size_t new_size, bool *deferred_work_generated);
static void sec_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
bool *deferred_work_generated);
static bool sec_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated);
static bool sec_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
size_t old_size, size_t new_size, bool *deferred_work_generated);
static void sec_dalloc(
tsdn_t *tsdn, pai_t *self, edata_t *edata, bool *deferred_work_generated);
static void
sec_bin_init(sec_bin_t *bin) {
@ -29,16 +29,16 @@ sec_init(tsdn_t *tsdn, sec_t *sec, base_t *base, pai_t *fallback,
* USIZE_GROW_SLOW_THRESHOLD because the usize above this increases
* by PAGE and the number of usizes is too large.
*/
assert(!sz_large_size_classes_disabled() ||
opts->max_alloc <= USIZE_GROW_SLOW_THRESHOLD);
assert(!sz_large_size_classes_disabled()
|| opts->max_alloc <= USIZE_GROW_SLOW_THRESHOLD);
size_t max_alloc = PAGE_FLOOR(opts->max_alloc);
size_t max_alloc = PAGE_FLOOR(opts->max_alloc);
pszind_t npsizes = sz_psz2ind(max_alloc) + 1;
size_t sz_shards = opts->nshards * sizeof(sec_shard_t);
size_t sz_bins = opts->nshards * (size_t)npsizes * sizeof(sec_bin_t);
size_t sz_alloc = sz_shards + sz_bins;
void *dynalloc = base_alloc(tsdn, base, sz_alloc, CACHELINE);
void *dynalloc = base_alloc(tsdn, base, sz_alloc, CACHELINE);
if (dynalloc == NULL) {
return true;
}
@ -74,7 +74,6 @@ sec_init(tsdn_t *tsdn, sec_t *sec, base_t *base, pai_t *fallback,
assert((char *)bin_cur == ((char *)dynalloc + sz_alloc));
sec->fallback = fallback;
sec->opts = *opts;
sec->npsizes = npsizes;
@ -102,7 +101,7 @@ sec_shard_pick(tsdn_t *tsdn, sec_t *sec) {
if (tsdn_null(tsdn)) {
return &sec->shards[0];
}
tsd_t *tsd = tsdn_tsd(tsdn);
tsd_t *tsd = tsdn_tsd(tsdn);
uint8_t *idxp = tsd_sec_shardp_get(tsd);
if (*idxp == (uint8_t)-1) {
/*
@ -111,9 +110,10 @@ sec_shard_pick(tsdn_t *tsdn, sec_t *sec) {
* number to store 32 bits, since we'll deliberately overflow
* when we multiply by the number of shards.
*/
uint64_t rand32 = prng_lg_range_u64(tsd_prng_statep_get(tsd), 32);
uint32_t idx =
(uint32_t)((rand32 * (uint64_t)sec->opts.nshards) >> 32);
uint64_t rand32 = prng_lg_range_u64(
tsd_prng_statep_get(tsd), 32);
uint32_t idx = (uint32_t)((rand32 * (uint64_t)sec->opts.nshards)
>> 32);
assert(idx < (uint32_t)sec->opts.nshards);
*idxp = (uint8_t)idx;
}
@ -157,13 +157,13 @@ sec_flush_some_and_unlock(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard) {
malloc_mutex_unlock(tsdn, &shard->mtx);
bool deferred_work_generated = false;
pai_dalloc_batch(tsdn, sec->fallback, &to_flush,
&deferred_work_generated);
pai_dalloc_batch(
tsdn, sec->fallback, &to_flush, &deferred_work_generated);
}
static edata_t *
sec_shard_alloc_locked(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard,
sec_bin_t *bin) {
sec_shard_alloc_locked(
tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard, sec_bin_t *bin) {
malloc_mutex_assert_owner(tsdn, &shard->mtx);
if (!shard->enabled) {
return NULL;
@ -186,7 +186,7 @@ sec_batch_fill_and_alloc(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard,
edata_list_active_t result;
edata_list_active_init(&result);
bool deferred_work_generated = false;
bool deferred_work_generated = false;
size_t nalloc = pai_alloc_batch(tsdn, sec->fallback, size,
1 + sec->opts.batch_fill_extra, &result, frequent_reuse,
&deferred_work_generated);
@ -243,8 +243,8 @@ sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
assert(pszind < sec->npsizes);
sec_shard_t *shard = sec_shard_pick(tsdn, sec);
sec_bin_t *bin = &shard->bins[pszind];
bool do_batch_fill = false;
sec_bin_t *bin = &shard->bins[pszind];
bool do_batch_fill = false;
malloc_mutex_lock(tsdn, &shard->mtx);
edata_t *edata = sec_shard_alloc_locked(tsdn, sec, shard, bin);
@ -258,8 +258,8 @@ sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
malloc_mutex_unlock(tsdn, &shard->mtx);
if (edata == NULL) {
if (do_batch_fill) {
edata = sec_batch_fill_and_alloc(tsdn, sec, shard, bin,
size, frequent_reuse);
edata = sec_batch_fill_and_alloc(
tsdn, sec, shard, bin, size, frequent_reuse);
} else {
edata = pai_alloc(tsdn, sec->fallback, size, alignment,
zero, /* guarded */ false, frequent_reuse,
@ -304,16 +304,16 @@ sec_flush_all_locked(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard) {
* rare pathways.
*/
bool deferred_work_generated = false;
pai_dalloc_batch(tsdn, sec->fallback, &to_flush,
&deferred_work_generated);
pai_dalloc_batch(
tsdn, sec->fallback, &to_flush, &deferred_work_generated);
}
static void
sec_shard_dalloc_and_unlock(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard,
edata_t *edata) {
sec_shard_dalloc_and_unlock(
tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard, edata_t *edata) {
malloc_mutex_assert_owner(tsdn, &shard->mtx);
assert(shard->bytes_cur <= sec->opts.max_bytes);
size_t size = edata_size_get(edata);
size_t size = edata_size_get(edata);
pszind_t pszind = sz_psz2ind(size);
assert(pszind < sec->npsizes);
/*
@ -342,13 +342,12 @@ sec_shard_dalloc_and_unlock(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard,
}
static void
sec_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
bool *deferred_work_generated) {
sec_dalloc(
tsdn_t *tsdn, pai_t *self, edata_t *edata, bool *deferred_work_generated) {
sec_t *sec = (sec_t *)self;
if (sec->opts.nshards == 0
|| edata_size_get(edata) > sec->opts.max_alloc) {
pai_dalloc(tsdn, sec->fallback, edata,
deferred_work_generated);
pai_dalloc(tsdn, sec->fallback, edata, deferred_work_generated);
return;
}
sec_shard_t *shard = sec_shard_pick(tsdn, sec);
@ -357,8 +356,7 @@ sec_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
sec_shard_dalloc_and_unlock(tsdn, sec, shard, edata);
} else {
malloc_mutex_unlock(tsdn, &shard->mtx);
pai_dalloc(tsdn, sec->fallback, edata,
deferred_work_generated);
pai_dalloc(tsdn, sec->fallback, edata, deferred_work_generated);
}
}
@ -398,12 +396,12 @@ sec_stats_merge(tsdn_t *tsdn, sec_t *sec, sec_stats_t *stats) {
}
void
sec_mutex_stats_read(tsdn_t *tsdn, sec_t *sec,
mutex_prof_data_t *mutex_prof_data) {
sec_mutex_stats_read(
tsdn_t *tsdn, sec_t *sec, mutex_prof_data_t *mutex_prof_data) {
for (size_t i = 0; i < sec->opts.nshards; i++) {
malloc_mutex_lock(tsdn, &sec->shards[i].mtx);
malloc_mutex_prof_accum(tsdn, mutex_prof_data,
&sec->shards[i].mtx);
malloc_mutex_prof_accum(
tsdn, mutex_prof_data, &sec->shards[i].mtx);
malloc_mutex_unlock(tsdn, &sec->shards[i].mtx);
}
}

File diff suppressed because it is too large Load diff

View file

@ -3,12 +3,12 @@
#include "jemalloc/internal/sz.h"
JEMALLOC_ALIGNED(CACHELINE)
size_t sz_pind2sz_tab[SC_NPSIZES+1];
size_t sz_pind2sz_tab[SC_NPSIZES + 1];
size_t sz_large_pad;
size_t
sz_psz_quantize_floor(size_t size) {
size_t ret;
size_t ret;
pszind_t pind;
assert(size > 0);
@ -47,8 +47,8 @@ sz_psz_quantize_ceil(size_t size) {
* search would potentially find sufficiently aligned available
* memory somewhere lower.
*/
ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
sz_large_pad;
ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1))
+ sz_large_pad;
}
return ret;
}
@ -93,12 +93,12 @@ sz_boot_size2index_tab(const sc_data_t *sc_data) {
size_t dst_max = (SC_LOOKUP_MAXCLASS >> SC_LG_TINY_MIN) + 1;
size_t dst_ind = 0;
for (unsigned sc_ind = 0; sc_ind < SC_NSIZES && dst_ind < dst_max;
sc_ind++) {
sc_ind++) {
const sc_t *sc = &sc_data->sc[sc_ind];
size_t sz = (ZU(1) << sc->lg_base)
size_t sz = (ZU(1) << sc->lg_base)
+ (ZU(sc->ndelta) << sc->lg_delta);
size_t max_ind = ((sz + (ZU(1) << SC_LG_TINY_MIN) - 1)
>> SC_LG_TINY_MIN);
>> SC_LG_TINY_MIN);
for (; dst_ind <= max_ind && dst_ind < dst_max; dst_ind++) {
assert(sc_ind < 1 << (sizeof(uint8_t) * 8));
sz_size2index_tab[dst_ind] = (uint8_t)sc_ind;

File diff suppressed because it is too large Load diff

View file

@ -16,7 +16,8 @@ te_ctx_has_active_events(te_ctx_t *ctx) {
}
} else {
for (int i = 0; i < te_dalloc_count; ++i) {
if (te_enabled_yes == te_dalloc_handlers[i]->enabled()) {
if (te_enabled_yes
== te_dalloc_handlers[i]->enabled()) {
return true;
}
}
@ -26,12 +27,11 @@ te_ctx_has_active_events(te_ctx_t *ctx) {
static uint64_t
te_next_event_compute(tsd_t *tsd, bool is_alloc) {
te_base_cb_t **handlers = is_alloc ?
te_alloc_handlers : te_dalloc_handlers;
uint64_t *waits = is_alloc ?
tsd_te_datap_get_unsafe(tsd)->alloc_wait :
tsd_te_datap_get_unsafe(tsd)->dalloc_wait;
int count = is_alloc ? te_alloc_count : te_dalloc_count;
te_base_cb_t **handlers = is_alloc ? te_alloc_handlers
: te_dalloc_handlers;
uint64_t *waits = is_alloc ? tsd_te_datap_get_unsafe(tsd)->alloc_wait
: tsd_te_datap_get_unsafe(tsd)->dalloc_wait;
int count = is_alloc ? te_alloc_count : te_dalloc_count;
uint64_t wait = TE_MAX_START_WAIT;
@ -86,9 +86,9 @@ te_assert_invariants_impl(tsd_t *tsd, te_ctx_t *ctx) {
* below is stronger than needed, but having an exactly accurate guard
* is more complicated to implement.
*/
assert((!te_ctx_has_active_events(ctx) && last_event == 0U) ||
interval == min_wait ||
(interval < min_wait && interval == TE_MAX_INTERVAL));
assert((!te_ctx_has_active_events(ctx) && last_event == 0U)
|| interval == min_wait
|| (interval < min_wait && interval == TE_MAX_INTERVAL));
}
void
@ -151,8 +151,9 @@ te_assert_invariants_debug(tsd_t *tsd) {
static void
te_ctx_next_event_fast_update(te_ctx_t *ctx) {
uint64_t next_event = te_ctx_next_event_get(ctx);
uint64_t next_event_fast = (next_event <= TE_NEXT_EVENT_FAST_MAX) ?
next_event : 0U;
uint64_t next_event_fast = (next_event <= TE_NEXT_EVENT_FAST_MAX)
? next_event
: 0U;
te_ctx_next_event_fast_set(ctx, next_event_fast);
}
@ -177,8 +178,7 @@ te_recompute_fast_threshold(tsd_t *tsd) {
}
static inline void
te_adjust_thresholds_impl(tsd_t *tsd, te_ctx_t *ctx,
uint64_t wait) {
te_adjust_thresholds_impl(tsd_t *tsd, te_ctx_t *ctx, uint64_t wait) {
/*
* The next threshold based on future events can only be adjusted after
* progressing the last_event counter (which is set to current).
@ -186,23 +186,22 @@ te_adjust_thresholds_impl(tsd_t *tsd, te_ctx_t *ctx,
assert(te_ctx_current_bytes_get(ctx) == te_ctx_last_event_get(ctx));
assert(wait <= TE_MAX_START_WAIT);
uint64_t next_event = te_ctx_last_event_get(ctx) + (wait <=
TE_MAX_INTERVAL ? wait : TE_MAX_INTERVAL);
uint64_t next_event = te_ctx_last_event_get(ctx)
+ (wait <= TE_MAX_INTERVAL ? wait : TE_MAX_INTERVAL);
te_ctx_next_event_set(tsd, ctx, next_event);
}
void
te_adjust_thresholds_helper(tsd_t *tsd, te_ctx_t *ctx,
uint64_t wait) {
te_adjust_thresholds_helper(tsd_t *tsd, te_ctx_t *ctx, uint64_t wait) {
te_adjust_thresholds_impl(tsd, ctx, wait);
}
static void
te_init_waits(tsd_t *tsd, uint64_t *wait, bool is_alloc) {
te_base_cb_t **handlers = is_alloc ? te_alloc_handlers : te_dalloc_handlers;
uint64_t *waits = is_alloc ?
tsd_te_datap_get_unsafe(tsd)->alloc_wait :
tsd_te_datap_get_unsafe(tsd)->dalloc_wait;
int count = is_alloc ? te_alloc_count : te_dalloc_count;
te_base_cb_t **handlers = is_alloc ? te_alloc_handlers
: te_dalloc_handlers;
uint64_t *waits = is_alloc ? tsd_te_datap_get_unsafe(tsd)->alloc_wait
: tsd_te_datap_get_unsafe(tsd)->dalloc_wait;
int count = is_alloc ? te_alloc_count : te_dalloc_count;
for (int i = 0; i < count; i++) {
if (te_enabled_yes == handlers[i]->enabled()) {
uint64_t ev_wait = handlers[i]->new_event_wait(tsd);
@ -216,25 +215,23 @@ te_init_waits(tsd_t *tsd, uint64_t *wait, bool is_alloc) {
}
static inline bool
te_update_wait(tsd_t *tsd, uint64_t accumbytes, bool allow,
uint64_t *ev_wait, uint64_t *wait, te_base_cb_t *handler,
uint64_t new_wait) {
te_update_wait(tsd_t *tsd, uint64_t accumbytes, bool allow, uint64_t *ev_wait,
uint64_t *wait, te_base_cb_t *handler, uint64_t new_wait) {
bool ret = false;
if (*ev_wait > accumbytes) {
*ev_wait -= accumbytes;
} else if (!allow) {
*ev_wait = handler->postponed_event_wait(tsd);
} else {
ret = true;
*ev_wait = new_wait == 0 ?
handler->new_event_wait(tsd) :
new_wait;
}
*ev_wait -= accumbytes;
} else if (!allow) {
*ev_wait = handler->postponed_event_wait(tsd);
} else {
ret = true;
*ev_wait = new_wait == 0 ? handler->new_event_wait(tsd)
: new_wait;
}
assert(*ev_wait > 0);
if (*ev_wait < *wait) {
*wait = *ev_wait;
}
assert(*ev_wait > 0);
if (*ev_wait < *wait) {
*wait = *ev_wait;
}
return ret;
}
@ -242,32 +239,32 @@ extern uint64_t stats_interval_accum_batch;
/* Return number of handlers enqueued into to_trigger array */
static inline size_t
te_update_alloc_events(tsd_t *tsd, te_base_cb_t **to_trigger,
uint64_t accumbytes, bool allow, uint64_t *wait) {
uint64_t accumbytes, bool allow, uint64_t *wait) {
/*
* We do not loop and invoke the functions via interface because
* of the perf cost. This path is relatively hot, so we sacrifice
* elegance for perf.
*/
size_t nto_trigger = 0;
size_t nto_trigger = 0;
uint64_t *waits = tsd_te_datap_get_unsafe(tsd)->alloc_wait;
if (opt_tcache_gc_incr_bytes > 0) {
assert(te_enabled_yes ==
te_alloc_handlers[te_alloc_tcache_gc]->enabled());
assert(te_enabled_yes
== te_alloc_handlers[te_alloc_tcache_gc]->enabled());
if (te_update_wait(tsd, accumbytes, allow,
&waits[te_alloc_tcache_gc], wait,
te_alloc_handlers[te_alloc_tcache_gc],
opt_tcache_gc_incr_bytes)) {
&waits[te_alloc_tcache_gc], wait,
te_alloc_handlers[te_alloc_tcache_gc],
opt_tcache_gc_incr_bytes)) {
to_trigger[nto_trigger++] =
te_alloc_handlers[te_alloc_tcache_gc];
}
}
#ifdef JEMALLOC_PROF
if (opt_prof) {
assert(te_enabled_yes ==
te_alloc_handlers[te_alloc_prof_sample]->enabled());
if(te_update_wait(tsd, accumbytes, allow,
&waits[te_alloc_prof_sample], wait,
te_alloc_handlers[te_alloc_prof_sample], 0)) {
if (opt_prof) {
assert(te_enabled_yes
== te_alloc_handlers[te_alloc_prof_sample]->enabled());
if (te_update_wait(tsd, accumbytes, allow,
&waits[te_alloc_prof_sample], wait,
te_alloc_handlers[te_alloc_prof_sample], 0)) {
to_trigger[nto_trigger++] =
te_alloc_handlers[te_alloc_prof_sample];
}
@ -275,12 +272,12 @@ te_update_alloc_events(tsd_t *tsd, te_base_cb_t **to_trigger,
#endif
if (opt_stats_interval >= 0) {
if (te_update_wait(tsd, accumbytes, allow,
&waits[te_alloc_stats_interval],
wait,
te_alloc_handlers[te_alloc_stats_interval],
stats_interval_accum_batch)) {
assert(te_enabled_yes ==
te_alloc_handlers[te_alloc_stats_interval]->enabled());
&waits[te_alloc_stats_interval], wait,
te_alloc_handlers[te_alloc_stats_interval],
stats_interval_accum_batch)) {
assert(te_enabled_yes
== te_alloc_handlers[te_alloc_stats_interval]
->enabled());
to_trigger[nto_trigger++] =
te_alloc_handlers[te_alloc_stats_interval];
}
@ -288,30 +285,30 @@ te_update_alloc_events(tsd_t *tsd, te_base_cb_t **to_trigger,
#ifdef JEMALLOC_STATS
assert(te_enabled_yes == te_alloc_handlers[te_alloc_peak]->enabled());
if(te_update_wait(tsd, accumbytes, allow, &waits[te_alloc_peak], wait,
te_alloc_handlers[te_alloc_peak], PEAK_EVENT_WAIT)) {
if (te_update_wait(tsd, accumbytes, allow, &waits[te_alloc_peak], wait,
te_alloc_handlers[te_alloc_peak], PEAK_EVENT_WAIT)) {
to_trigger[nto_trigger++] = te_alloc_handlers[te_alloc_peak];
}
}
assert(te_enabled_yes ==
te_alloc_handlers[te_alloc_prof_threshold]->enabled());
if(te_update_wait(tsd, accumbytes, allow,
&waits[te_alloc_prof_threshold], wait,
te_alloc_handlers[te_alloc_prof_threshold],
1 << opt_experimental_lg_prof_threshold)) {
assert(te_enabled_yes
== te_alloc_handlers[te_alloc_prof_threshold]->enabled());
if (te_update_wait(tsd, accumbytes, allow,
&waits[te_alloc_prof_threshold], wait,
te_alloc_handlers[te_alloc_prof_threshold],
1 << opt_experimental_lg_prof_threshold)) {
to_trigger[nto_trigger++] =
te_alloc_handlers[te_alloc_prof_threshold];
}
}
#endif
for (te_alloc_t ue = te_alloc_user0; ue <= te_alloc_user3; ue++) {
te_enabled_t status =
te_user_event_enabled(ue - te_alloc_user0, true);
te_enabled_t status = te_user_event_enabled(
ue - te_alloc_user0, true);
if (status == te_enabled_not_installed) {
break;
} else if (status == te_enabled_yes) {
if (te_update_wait(tsd, accumbytes, allow, &waits[ue],
wait, te_alloc_handlers[ue], 0)) {
wait, te_alloc_handlers[ue], 0)) {
to_trigger[nto_trigger++] =
te_alloc_handlers[ue];
}
@ -321,37 +318,36 @@ te_update_alloc_events(tsd_t *tsd, te_base_cb_t **to_trigger,
}
static inline size_t
te_update_dalloc_events(tsd_t *tsd, te_base_cb_t **to_trigger, uint64_t accumbytes,
bool allow, uint64_t *wait) {
size_t nto_trigger = 0;
te_update_dalloc_events(tsd_t *tsd, te_base_cb_t **to_trigger,
uint64_t accumbytes, bool allow, uint64_t *wait) {
size_t nto_trigger = 0;
uint64_t *waits = tsd_te_datap_get_unsafe(tsd)->dalloc_wait;
if (opt_tcache_gc_incr_bytes > 0) {
assert(te_enabled_yes ==
te_dalloc_handlers[te_dalloc_tcache_gc]->enabled());
assert(te_enabled_yes
== te_dalloc_handlers[te_dalloc_tcache_gc]->enabled());
if (te_update_wait(tsd, accumbytes, allow,
&waits[te_dalloc_tcache_gc], wait,
te_dalloc_handlers[te_dalloc_tcache_gc],
opt_tcache_gc_incr_bytes)) {
&waits[te_dalloc_tcache_gc], wait,
te_dalloc_handlers[te_dalloc_tcache_gc],
opt_tcache_gc_incr_bytes)) {
to_trigger[nto_trigger++] =
te_dalloc_handlers[te_dalloc_tcache_gc];
}
}
}
#ifdef JEMALLOC_STATS
assert(te_enabled_yes == te_dalloc_handlers[te_dalloc_peak]->enabled());
if(te_update_wait(tsd, accumbytes, allow, &waits[te_dalloc_peak], wait,
te_dalloc_handlers[te_dalloc_peak],
PEAK_EVENT_WAIT)) {
if (te_update_wait(tsd, accumbytes, allow, &waits[te_dalloc_peak], wait,
te_dalloc_handlers[te_dalloc_peak], PEAK_EVENT_WAIT)) {
to_trigger[nto_trigger++] = te_dalloc_handlers[te_dalloc_peak];
}
}
#endif
for (te_dalloc_t ue = te_dalloc_user0; ue <= te_dalloc_user3; ue++) {
te_enabled_t status =
te_user_event_enabled(ue - te_dalloc_user0, false);
te_enabled_t status = te_user_event_enabled(
ue - te_dalloc_user0, false);
if (status == te_enabled_not_installed) {
break;
} else if (status == te_enabled_yes) {
if (te_update_wait(tsd, accumbytes, allow, &waits[ue],
wait, te_dalloc_handlers[ue], 0)) {
wait, te_dalloc_handlers[ue], 0)) {
to_trigger[nto_trigger++] =
te_dalloc_handlers[ue];
}
@ -369,26 +365,22 @@ te_event_trigger(tsd_t *tsd, te_ctx_t *ctx) {
te_ctx_last_event_set(ctx, bytes_after);
bool allow_event_trigger = tsd_nominal(tsd) &&
tsd_reentrancy_level_get(tsd) == 0;
bool allow_event_trigger = tsd_nominal(tsd)
&& tsd_reentrancy_level_get(tsd) == 0;
uint64_t wait = TE_MAX_START_WAIT;
assert((int)te_alloc_count >= (int) te_dalloc_count);
assert((int)te_alloc_count >= (int)te_dalloc_count);
te_base_cb_t *to_trigger[te_alloc_count];
size_t nto_trigger;
size_t nto_trigger;
if (ctx->is_alloc) {
nto_trigger = te_update_alloc_events(tsd, to_trigger,
accumbytes,
allow_event_trigger,
&wait);
nto_trigger = te_update_alloc_events(
tsd, to_trigger, accumbytes, allow_event_trigger, &wait);
} else {
nto_trigger = te_update_dalloc_events(tsd, to_trigger,
accumbytes,
allow_event_trigger,
&wait);
nto_trigger = te_update_dalloc_events(
tsd, to_trigger, accumbytes, allow_event_trigger, &wait);
}
assert(wait <= TE_MAX_START_WAIT);
assert(wait <= TE_MAX_START_WAIT);
te_adjust_thresholds_helper(tsd, ctx, wait);
te_assert_invariants(tsd);

View file

@ -145,34 +145,25 @@ TE_USER_HANDLER_BINDING_IDX(3);
/* Table of all the thread events. */
te_base_cb_t *te_alloc_handlers[te_alloc_count] = {
#ifdef JEMALLOC_PROF
&prof_sample_te_handler,
&prof_sample_te_handler,
#endif
&stats_interval_te_handler,
&tcache_gc_te_handler,
&stats_interval_te_handler, &tcache_gc_te_handler,
#ifdef JEMALLOC_STATS
&prof_threshold_te_handler,
&peak_te_handler,
&prof_threshold_te_handler, &peak_te_handler,
#endif
&user_alloc_handler0,
&user_alloc_handler1,
&user_alloc_handler2,
&user_alloc_handler3
};
&user_alloc_handler0, &user_alloc_handler1, &user_alloc_handler2,
&user_alloc_handler3};
te_base_cb_t *te_dalloc_handlers[te_dalloc_count] = {
&tcache_gc_te_handler,
te_base_cb_t *te_dalloc_handlers[te_dalloc_count] = {&tcache_gc_te_handler,
#ifdef JEMALLOC_STATS
&peak_te_handler,
&peak_te_handler,
#endif
&user_dalloc_handler0,
&user_dalloc_handler1,
&user_dalloc_handler2,
&user_dalloc_handler3
};
&user_dalloc_handler0, &user_dalloc_handler1, &user_dalloc_handler2,
&user_dalloc_handler3};
static inline bool
te_update_tsd(tsd_t *tsd, uint64_t new_wait, size_t ue_idx, bool is_alloc) {
bool needs_recompute = false;
bool needs_recompute = false;
te_ctx_t ctx;
uint64_t next, current, cur_wait;

View file

@ -20,13 +20,8 @@
* The values here are computed in src/ticker.py
*/
const uint8_t ticker_geom_table[1 << TICKER_GEOM_NBITS] = {
254, 211, 187, 169, 156, 144, 135, 127,
120, 113, 107, 102, 97, 93, 89, 85,
81, 77, 74, 71, 68, 65, 62, 60,
57, 55, 53, 50, 48, 46, 44, 42,
40, 39, 37, 35, 33, 32, 30, 29,
27, 26, 24, 23, 21, 20, 19, 18,
16, 15, 14, 13, 12, 10, 9, 8,
7, 6, 5, 4, 3, 2, 1, 0
};
const uint8_t ticker_geom_table[1 << TICKER_GEOM_NBITS] = {254, 211, 187, 169,
156, 144, 135, 127, 120, 113, 107, 102, 97, 93, 89, 85, 81, 77, 74, 71, 68,
65, 62, 60, 57, 55, 53, 50, 48, 46, 44, 42, 40, 39, 37, 35, 33, 32, 30, 29,
27, 26, 24, 23, 21, 20, 19, 18, 16, 15, 14, 13, 12, 10, 9, 8, 7, 6, 5, 4, 3,
2, 1, 0};

123
src/tsd.c
View file

@ -20,19 +20,20 @@ bool tsd_booted = false;
#elif (defined(JEMALLOC_TLS))
JEMALLOC_TSD_TYPE_ATTR(tsd_t) tsd_tls = TSD_INITIALIZER;
pthread_key_t tsd_tsd;
bool tsd_booted = false;
bool tsd_booted = false;
#elif (defined(_WIN32))
#if defined(JEMALLOC_LEGACY_WINDOWS_SUPPORT) || !defined(_MSC_VER)
DWORD tsd_tsd;
# if defined(JEMALLOC_LEGACY_WINDOWS_SUPPORT) || !defined(_MSC_VER)
DWORD tsd_tsd;
tsd_wrapper_t tsd_boot_wrapper = {TSD_INITIALIZER, false};
#else
JEMALLOC_TSD_TYPE_ATTR(tsd_wrapper_t) tsd_wrapper_tls = { TSD_INITIALIZER, false };
#endif
# else
JEMALLOC_TSD_TYPE_ATTR(tsd_wrapper_t)
tsd_wrapper_tls = {TSD_INITIALIZER, false};
# endif
bool tsd_booted = false;
#if JEMALLOC_WIN32_TLSGETVALUE2
TGV2 tls_get_value2 = NULL;
# if JEMALLOC_WIN32_TLSGETVALUE2
TGV2 tls_get_value2 = NULL;
HMODULE tgv2_mod = NULL;
#endif
# endif
#else
/*
@ -45,17 +46,12 @@ struct tsd_init_head_s {
malloc_mutex_t lock;
};
pthread_key_t tsd_tsd;
tsd_init_head_t tsd_init_head = {
ql_head_initializer(blocks),
MALLOC_MUTEX_INITIALIZER
};
pthread_key_t tsd_tsd;
tsd_init_head_t tsd_init_head = {
ql_head_initializer(blocks), MALLOC_MUTEX_INITIALIZER};
tsd_wrapper_t tsd_boot_wrapper = {
false,
TSD_INITIALIZER
};
bool tsd_booted = false;
tsd_wrapper_t tsd_boot_wrapper = {false, TSD_INITIALIZER};
bool tsd_booted = false;
#endif
JEMALLOC_DIAGNOSTIC_POP
@ -64,7 +60,7 @@ JEMALLOC_DIAGNOSTIC_POP
/* A list of all the tsds in the nominal state. */
typedef ql_head(tsd_t) tsd_list_t;
static tsd_list_t tsd_nominal_tsds = ql_head_initializer(tsd_nominal_tsds);
static tsd_list_t tsd_nominal_tsds = ql_head_initializer(tsd_nominal_tsds);
static malloc_mutex_t tsd_nominal_tsds_lock;
/* How many slow-path-enabling features are turned on. */
@ -73,13 +69,13 @@ static atomic_u32_t tsd_global_slow_count = ATOMIC_INIT(0);
static bool
tsd_in_nominal_list(tsd_t *tsd) {
tsd_t *tsd_list;
bool found = false;
bool found = false;
/*
* We don't know that tsd is nominal; it might not be safe to get data
* out of it here.
*/
malloc_mutex_lock(TSDN_NULL, &tsd_nominal_tsds_lock);
ql_foreach(tsd_list, &tsd_nominal_tsds, TSD_MANGLE(tsd_link)) {
ql_foreach (tsd_list, &tsd_nominal_tsds, TSD_MANGLE(tsd_link)) {
if (tsd == tsd_list) {
found = true;
break;
@ -117,7 +113,7 @@ tsd_force_recompute(tsdn_t *tsdn) {
atomic_fence(ATOMIC_RELEASE);
malloc_mutex_lock(tsdn, &tsd_nominal_tsds_lock);
tsd_t *remote_tsd;
ql_foreach(remote_tsd, &tsd_nominal_tsds, TSD_MANGLE(tsd_link)) {
ql_foreach (remote_tsd, &tsd_nominal_tsds, TSD_MANGLE(tsd_link)) {
assert(tsd_atomic_load(&remote_tsd->state, ATOMIC_RELAXED)
<= tsd_state_nominal_max);
tsd_atomic_store(&remote_tsd->state,
@ -143,7 +139,8 @@ tsd_global_slow_inc(tsdn_t *tsdn) {
tsd_force_recompute(tsdn);
}
void tsd_global_slow_dec(tsdn_t *tsdn) {
void
tsd_global_slow_dec(tsdn_t *tsdn) {
atomic_fetch_sub_u32(&tsd_global_slow_count, 1, ATOMIC_RELAXED);
/* See the note in ..._inc(). */
tsd_force_recompute(tsdn);
@ -180,8 +177,8 @@ tsd_slow_update(tsd_t *tsd) {
uint8_t old_state;
do {
uint8_t new_state = tsd_state_compute(tsd);
old_state = tsd_atomic_exchange(&tsd->state, new_state,
ATOMIC_ACQUIRE);
old_state = tsd_atomic_exchange(
&tsd->state, new_state, ATOMIC_ACQUIRE);
} while (old_state == tsd_state_nominal_recompute);
te_recompute_fast_threshold(tsd);
@ -211,8 +208,8 @@ tsd_state_set(tsd_t *tsd, uint8_t new_state) {
assert(tsd_in_nominal_list(tsd));
if (new_state > tsd_state_nominal_max) {
tsd_remove_nominal(tsd);
tsd_atomic_store(&tsd->state, new_state,
ATOMIC_RELAXED);
tsd_atomic_store(
&tsd->state, new_state, ATOMIC_RELAXED);
} else {
/*
* This is the tricky case. We're transitioning from
@ -235,8 +232,7 @@ tsd_prng_state_init(tsd_t *tsd) {
* cost of test repeatability. For debug builds, instead use a
* deterministic seed.
*/
*tsd_prng_statep_get(tsd) = config_debug ? 0 :
(uint64_t)(uintptr_t)tsd;
*tsd_prng_statep_get(tsd) = config_debug ? 0 : (uint64_t)(uintptr_t)tsd;
}
static bool
@ -264,8 +260,8 @@ assert_tsd_data_cleanup_done(tsd_t *tsd) {
static bool
tsd_data_init_nocleanup(tsd_t *tsd) {
assert(tsd_state_get(tsd) == tsd_state_reincarnated ||
tsd_state_get(tsd) == tsd_state_minimal_initialized);
assert(tsd_state_get(tsd) == tsd_state_reincarnated
|| tsd_state_get(tsd) == tsd_state_minimal_initialized);
/*
* During reincarnation, there is no guarantee that the cleanup function
* will be called (deallocation may happen after all tsd destructors).
@ -358,15 +354,15 @@ malloc_tsd_dalloc(void *wrapper) {
}
#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32)
static unsigned ncleanups;
static unsigned ncleanups;
static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX];
#ifndef _WIN32
# ifndef _WIN32
JEMALLOC_EXPORT
#endif
# endif
void
_malloc_thread_cleanup(void) {
bool pending[MALLOC_TSD_CLEANUPS_MAX], again;
bool pending[MALLOC_TSD_CLEANUPS_MAX], again;
unsigned i;
for (i = 0; i < ncleanups; i++) {
@ -386,9 +382,9 @@ _malloc_thread_cleanup(void) {
} while (again);
}
#ifndef _WIN32
# ifndef _WIN32
JEMALLOC_EXPORT
#endif
# endif
void
_malloc_tsd_cleanup_register(bool (*f)(void)) {
assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX);
@ -446,7 +442,7 @@ tsd_cleanup(void *arg) {
}
#ifdef JEMALLOC_JET
test_callback_t test_callback = *tsd_test_callbackp_get_unsafe(tsd);
int *data = tsd_test_datap_get_unsafe(tsd);
int *data = tsd_test_datap_get_unsafe(tsd);
if (test_callback != NULL) {
test_callback(data);
}
@ -461,7 +457,7 @@ malloc_tsd_boot0(void) {
ncleanups = 0;
#endif
if (malloc_mutex_init(&tsd_nominal_tsds_lock, "tsd_nominal_tsds_lock",
WITNESS_RANK_OMIT, malloc_mutex_rank_exclusive)) {
WITNESS_RANK_OMIT, malloc_mutex_rank_exclusive)) {
return NULL;
}
if (tsd_boot0()) {
@ -483,11 +479,11 @@ malloc_tsd_boot1(void) {
static BOOL WINAPI
_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) {
switch (fdwReason) {
#ifdef JEMALLOC_LAZY_LOCK
# ifdef JEMALLOC_LAZY_LOCK
case DLL_THREAD_ATTACH:
isthreaded = true;
break;
#endif
# endif
case DLL_THREAD_DETACH:
_malloc_thread_cleanup();
break;
@ -502,35 +498,36 @@ _tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) {
* hooked "read". We won't read for the rest of the file, so we can get away
* with unhooking.
*/
#ifdef read
# undef read
# ifdef read
# undef read
# endif
# ifdef _MSC_VER
# ifdef _M_IX86
# pragma comment(linker, "/INCLUDE:__tls_used")
# pragma comment(linker, "/INCLUDE:_tls_callback")
# else
# pragma comment(linker, "/INCLUDE:_tls_used")
# pragma comment( \
linker, "/INCLUDE:" STRINGIFY(tls_callback))
# endif
# pragma section(".CRT$XLY", long, read)
# endif
JEMALLOC_SECTION(".CRT$XLY")
JEMALLOC_ATTR(used) BOOL(WINAPI *const tls_callback)(
HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) = _tls_callback;
#endif
#ifdef _MSC_VER
# ifdef _M_IX86
# pragma comment(linker, "/INCLUDE:__tls_used")
# pragma comment(linker, "/INCLUDE:_tls_callback")
# else
# pragma comment(linker, "/INCLUDE:_tls_used")
# pragma comment(linker, "/INCLUDE:" STRINGIFY(tls_callback) )
# endif
# pragma section(".CRT$XLY",long,read)
#endif
JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used)
BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL,
DWORD fdwReason, LPVOID lpvReserved) = _tls_callback;
#endif
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) \
&& !defined(_WIN32))
void *
tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) {
pthread_t self = pthread_self();
pthread_t self = pthread_self();
tsd_init_block_t *iter;
/* Check whether this thread has already inserted into the list. */
malloc_mutex_lock(TSDN_NULL, &head->lock);
ql_foreach(iter, &head->blocks, link) {
ql_foreach (iter, &head->blocks, link) {
if (iter->thread == self) {
malloc_mutex_unlock(TSDN_NULL, &head->lock);
return iter->data;

View file

@ -8,8 +8,8 @@ bool
multi_setting_parse_next(const char **setting_segment_cur, size_t *len_left,
size_t *key_start, size_t *key_end, size_t *value) {
const char *cur = *setting_segment_cur;
char *end;
uintmax_t um;
char *end;
uintmax_t um;
set_errno(0);
@ -46,4 +46,3 @@ multi_setting_parse_next(const char **setting_segment_cur, size_t *len_left,
return false;
}

View file

@ -26,8 +26,8 @@ witness_print_witness(witness_t *w, unsigned n) {
static void
witness_print_witnesses(const witness_list_t *witnesses) {
witness_t *w, *last = NULL;
unsigned n = 0;
ql_foreach(w, witnesses, link) {
unsigned n = 0;
ql_foreach (w, witnesses, link) {
if (last != NULL && w->rank > last->rank) {
assert(w->name != last->name);
witness_print_witness(last, n);
@ -45,8 +45,8 @@ witness_print_witnesses(const witness_list_t *witnesses) {
}
static void
witness_lock_error_impl(const witness_list_t *witnesses,
const witness_t *witness) {
witness_lock_error_impl(
const witness_list_t *witnesses, const witness_t *witness) {
malloc_printf("<jemalloc>: Lock rank order reversal:");
witness_print_witnesses(witnesses);
malloc_printf(" %s(%u)\n", witness->name, witness->rank);
@ -56,8 +56,8 @@ witness_lock_error_t *JET_MUTABLE witness_lock_error = witness_lock_error_impl;
static void
witness_owner_error_impl(const witness_t *witness) {
malloc_printf("<jemalloc>: Should own %s(%u)\n", witness->name,
witness->rank);
malloc_printf(
"<jemalloc>: Should own %s(%u)\n", witness->name, witness->rank);
abort();
}
witness_owner_error_t *JET_MUTABLE witness_owner_error =
@ -76,7 +76,7 @@ static void
witness_depth_error_impl(const witness_list_t *witnesses,
witness_rank_t rank_inclusive, unsigned depth) {
malloc_printf("<jemalloc>: Should own %u lock%s of rank >= %u:", depth,
(depth != 1) ? "s" : "", rank_inclusive);
(depth != 1) ? "s" : "", rank_inclusive);
witness_print_witnesses(witnesses);
malloc_printf("\n");
abort();

View file

@ -4,7 +4,7 @@
#include "jemalloc/internal/assert.h"
#ifndef JEMALLOC_ZONE
# error "This source file is for zones on Darwin (OS X)."
# error "This source file is for zones on Darwin (OS X)."
#endif
/* Definitions of the following structs in malloc/malloc.h might be too old
@ -22,10 +22,11 @@ typedef struct _malloc_zone_t {
void *(*realloc)(struct _malloc_zone_t *, void *, size_t);
void (*destroy)(struct _malloc_zone_t *);
const char *zone_name;
unsigned (*batch_malloc)(struct _malloc_zone_t *, size_t, void **, unsigned);
unsigned (*batch_malloc)(
struct _malloc_zone_t *, size_t, void **, unsigned);
void (*batch_free)(struct _malloc_zone_t *, void **, unsigned);
struct malloc_introspection_t *introspect;
unsigned version;
unsigned version;
void *(*memalign)(struct _malloc_zone_t *, size_t, size_t);
void (*free_definite_size)(struct _malloc_zone_t *, void *, size_t);
size_t (*pressure_relief)(struct _malloc_zone_t *, size_t);
@ -33,22 +34,24 @@ typedef struct _malloc_zone_t {
typedef struct {
vm_address_t address;
vm_size_t size;
vm_size_t size;
} vm_range_t;
typedef struct malloc_statistics_t {
unsigned blocks_in_use;
size_t size_in_use;
size_t max_size_in_use;
size_t size_allocated;
size_t size_in_use;
size_t max_size_in_use;
size_t size_allocated;
} malloc_statistics_t;
typedef kern_return_t memory_reader_t(task_t, vm_address_t, vm_size_t, void **);
typedef void vm_range_recorder_t(task_t, void *, unsigned type, vm_range_t *, unsigned);
typedef void vm_range_recorder_t(
task_t, void *, unsigned type, vm_range_t *, unsigned);
typedef struct malloc_introspection_t {
kern_return_t (*enumerator)(task_t, void *, unsigned, vm_address_t, memory_reader_t, vm_range_recorder_t);
kern_return_t (*enumerator)(task_t, void *, unsigned, vm_address_t,
memory_reader_t, vm_range_recorder_t);
size_t (*good_size)(malloc_zone_t *, size_t);
boolean_t (*check)(malloc_zone_t *);
void (*print)(malloc_zone_t *, boolean_t);
@ -61,14 +64,16 @@ typedef struct malloc_introspection_t {
boolean_t (*disable_discharge_checking)(malloc_zone_t *);
void (*discharge)(malloc_zone_t *, void *);
#ifdef __BLOCKS__
void (*enumerate_discharged_pointers)(malloc_zone_t *, void (^)(void *, void *));
void (*enumerate_discharged_pointers)(
malloc_zone_t *, void (^)(void *, void *));
#else
void *enumerate_unavailable_without_blocks;
#endif
void (*reinit_lock)(malloc_zone_t *);
} malloc_introspection_t;
extern kern_return_t malloc_get_all_zones(task_t, memory_reader_t, vm_address_t **, unsigned *);
extern kern_return_t malloc_get_all_zones(
task_t, memory_reader_t, vm_address_t **, unsigned *);
extern malloc_zone_t *malloc_default_zone(void);
@ -81,48 +86,46 @@ extern void malloc_zone_unregister(malloc_zone_t *zone);
* We need to check whether it is present at runtime, thus the weak_import.
*/
extern malloc_zone_t *malloc_default_purgeable_zone(void)
JEMALLOC_ATTR(weak_import);
JEMALLOC_ATTR(weak_import);
/******************************************************************************/
/* Data. */
static malloc_zone_t *default_zone, *purgeable_zone;
static malloc_zone_t jemalloc_zone;
static malloc_zone_t *default_zone, *purgeable_zone;
static malloc_zone_t jemalloc_zone;
static struct malloc_introspection_t jemalloc_zone_introspect;
static pid_t zone_force_lock_pid = -1;
static pid_t zone_force_lock_pid = -1;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static size_t zone_size(malloc_zone_t *zone, const void *ptr);
static void *zone_malloc(malloc_zone_t *zone, size_t size);
static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size);
static void *zone_valloc(malloc_zone_t *zone, size_t size);
static void zone_free(malloc_zone_t *zone, void *ptr);
static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
static void *zone_memalign(malloc_zone_t *zone, size_t alignment,
size_t size);
static void zone_free_definite_size(malloc_zone_t *zone, void *ptr,
size_t size);
static void zone_destroy(malloc_zone_t *zone);
static unsigned zone_batch_malloc(struct _malloc_zone_t *zone, size_t size,
static size_t zone_size(malloc_zone_t *zone, const void *ptr);
static void *zone_malloc(malloc_zone_t *zone, size_t size);
static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size);
static void *zone_valloc(malloc_zone_t *zone, size_t size);
static void zone_free(malloc_zone_t *zone, void *ptr);
static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
static void *zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size);
static void zone_free_definite_size(
malloc_zone_t *zone, void *ptr, size_t size);
static void zone_destroy(malloc_zone_t *zone);
static unsigned zone_batch_malloc(struct _malloc_zone_t *zone, size_t size,
void **results, unsigned num_requested);
static void zone_batch_free(struct _malloc_zone_t *zone,
void **to_be_freed, unsigned num_to_be_freed);
static size_t zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal);
static size_t zone_good_size(malloc_zone_t *zone, size_t size);
static kern_return_t zone_enumerator(task_t task, void *data, unsigned type_mask,
vm_address_t zone_address, memory_reader_t reader,
static void zone_batch_free(
struct _malloc_zone_t *zone, void **to_be_freed, unsigned num_to_be_freed);
static size_t zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal);
static size_t zone_good_size(malloc_zone_t *zone, size_t size);
static kern_return_t zone_enumerator(task_t task, void *data,
unsigned type_mask, vm_address_t zone_address, memory_reader_t reader,
vm_range_recorder_t recorder);
static boolean_t zone_check(malloc_zone_t *zone);
static void zone_print(malloc_zone_t *zone, boolean_t verbose);
static void zone_log(malloc_zone_t *zone, void *address);
static void zone_force_lock(malloc_zone_t *zone);
static void zone_force_unlock(malloc_zone_t *zone);
static void zone_statistics(malloc_zone_t *zone,
malloc_statistics_t *stats);
static boolean_t zone_locked(malloc_zone_t *zone);
static void zone_reinit_lock(malloc_zone_t *zone);
static boolean_t zone_check(malloc_zone_t *zone);
static void zone_print(malloc_zone_t *zone, boolean_t verbose);
static void zone_log(malloc_zone_t *zone, void *address);
static void zone_force_lock(malloc_zone_t *zone);
static void zone_force_unlock(malloc_zone_t *zone);
static void zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats);
static boolean_t zone_locked(malloc_zone_t *zone);
static void zone_reinit_lock(malloc_zone_t *zone);
/******************************************************************************/
/*
@ -225,8 +228,8 @@ zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results,
}
static void
zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed,
unsigned num_to_be_freed) {
zone_batch_free(
struct _malloc_zone_t *zone, void **to_be_freed, unsigned num_to_be_freed) {
unsigned i;
for (i = 0; i < num_to_be_freed; i++) {
@ -261,12 +264,10 @@ zone_check(malloc_zone_t *zone) {
}
static void
zone_print(malloc_zone_t *zone, boolean_t verbose) {
}
zone_print(malloc_zone_t *zone, boolean_t verbose) {}
static void
zone_log(malloc_zone_t *zone, void *address) {
}
zone_log(malloc_zone_t *zone, void *address) {}
static void
zone_force_lock(malloc_zone_t *zone) {
@ -369,7 +370,7 @@ zone_init(void) {
static malloc_zone_t *
zone_default_get(void) {
malloc_zone_t **zones = NULL;
unsigned int num_zones = 0;
unsigned int num_zones = 0;
/*
* On OSX 10.12, malloc_default_zone returns a special zone that is not
@ -380,8 +381,9 @@ zone_default_get(void) {
* zone is the default. So get the list of zones to get the first one,
* instead of relying on malloc_default_zone.
*/
if (KERN_SUCCESS != malloc_get_all_zones(0, NULL,
(vm_address_t**)&zones, &num_zones)) {
if (KERN_SUCCESS
!= malloc_get_all_zones(
0, NULL, (vm_address_t **)&zones, &num_zones)) {
/*
* Reset the value in case the failure happened after it was
* set.
@ -441,8 +443,8 @@ zone_register(void) {
* register jemalloc's.
*/
default_zone = zone_default_get();
if (!default_zone->zone_name || strcmp(default_zone->zone_name,
"DefaultMallocZone") != 0) {
if (!default_zone->zone_name
|| strcmp(default_zone->zone_name, "DefaultMallocZone") != 0) {
return;
}
@ -457,8 +459,9 @@ zone_register(void) {
* to check for the existence of malloc_default_purgeable_zone() at
* run time.
*/
purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL :
malloc_default_purgeable_zone();
purgeable_zone = (malloc_default_purgeable_zone == NULL)
? NULL
: malloc_default_purgeable_zone();
/* Register the custom zone. At this point it won't be the default. */
zone_init();