Reformat the codebase with the clang-format 18.

This commit is contained in:
guangli-dai 2025-06-13 12:31:12 -07:00 committed by Guangli Dai
parent 0a6215c171
commit f1bba4a87c
346 changed files with 18286 additions and 17770 deletions

View file

@ -13,13 +13,14 @@
*
* The calls to this thunk get driven by the peak_event module.
*/
#define ACTIVITY_CALLBACK_THUNK_INITIALIZER {NULL, NULL}
typedef void (*activity_callback_t)(void *uctx, uint64_t allocated,
uint64_t deallocated);
#define ACTIVITY_CALLBACK_THUNK_INITIALIZER \
{ NULL, NULL }
typedef void (*activity_callback_t)(
void *uctx, uint64_t allocated, uint64_t deallocated);
typedef struct activity_callback_thunk_s activity_callback_thunk_t;
struct activity_callback_thunk_s {
activity_callback_t callback;
void *uctx;
void *uctx;
};
#endif /* JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H */

View file

@ -21,7 +21,7 @@ extern ssize_t opt_dirty_decay_ms;
extern ssize_t opt_muzzy_decay_ms;
extern percpu_arena_mode_t opt_percpu_arena;
extern const char *const percpu_arena_mode_names[];
extern const char *const percpu_arena_mode_names[];
extern div_info_t arena_binind_div_info[SC_NBINS];
@ -30,7 +30,7 @@ extern emap_t arena_emap_global;
extern size_t opt_oversize_threshold;
extern size_t oversize_threshold;
extern bool opt_huge_arena_pac_thp;
extern bool opt_huge_arena_pac_thp;
extern pac_thp_t huge_arena_pac_thp;
/*
@ -39,90 +39,90 @@ extern pac_thp_t huge_arena_pac_thp;
*/
extern uint32_t arena_bin_offsets[SC_NBINS];
void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms,
ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy);
void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
size_t *nactive, size_t *ndirty, size_t *nmuzzy);
void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
bin_stats_data_t *bstats, arena_stats_large_t *lstats,
pac_estats_t *estats, hpa_shard_stats_t *hpastats, sec_stats_t *secstats);
bin_stats_data_t *bstats, arena_stats_large_t *lstats, pac_estats_t *estats,
hpa_shard_stats_t *hpastats, sec_stats_t *secstats);
void arena_handle_deferred_work(tsdn_t *tsdn, arena_t *arena);
edata_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
size_t usize, size_t alignment, bool zero);
void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena,
edata_t *edata);
void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
edata_t *edata, size_t oldusize);
void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
edata_t *edata, size_t oldusize);
bool arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, extent_state_t state,
ssize_t decay_ms);
edata_t *arena_extent_alloc_large(
tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero);
void arena_extent_dalloc_large_prep(
tsdn_t *tsdn, arena_t *arena, edata_t *edata);
void arena_extent_ralloc_large_shrink(
tsdn_t *tsdn, arena_t *arena, edata_t *edata, size_t oldusize);
void arena_extent_ralloc_large_expand(
tsdn_t *tsdn, arena_t *arena, edata_t *edata, size_t oldusize);
bool arena_decay_ms_set(
tsdn_t *tsdn, arena_t *arena, extent_state_t state, ssize_t decay_ms);
ssize_t arena_decay_ms_get(arena_t *arena, extent_state_t state);
void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
bool all);
void arena_decay(
tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all);
uint64_t arena_time_until_deferred(tsdn_t *tsdn, arena_t *arena);
void arena_do_deferred_work(tsdn_t *tsdn, arena_t *arena);
void arena_reset(tsd_t *tsd, arena_t *arena);
void arena_destroy(tsd_t *tsd, arena_t *arena);
void arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena,
cache_bin_t *cache_bin, szind_t binind, const cache_bin_sz_t nfill_min,
const cache_bin_sz_t nfill_max);
void arena_do_deferred_work(tsdn_t *tsdn, arena_t *arena);
void arena_reset(tsd_t *tsd, arena_t *arena);
void arena_destroy(tsd_t *tsd, arena_t *arena);
void arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena,
cache_bin_t *cache_bin, szind_t binind, const cache_bin_sz_t nfill_min,
const cache_bin_sz_t nfill_max);
void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
szind_t ind, bool zero, bool slab);
void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool zero, bool slab, tcache_t *tcache);
void arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize,
size_t bumped_usize);
void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
bool slow_path);
void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
bool zero, bool slab);
void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool zero, bool slab, tcache_t *tcache);
void arena_prof_promote(
tsdn_t *tsdn, void *ptr, size_t usize, size_t bumped_usize);
void arena_dalloc_promoted(
tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path);
void arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab);
void arena_dalloc_bin_locked_handle_newly_empty(tsdn_t *tsdn, arena_t *arena,
edata_t *slab, bin_t *bin);
void arena_dalloc_bin_locked_handle_newly_nonempty(tsdn_t *tsdn, arena_t *arena,
edata_t *slab, bin_t *bin);
void arena_dalloc_small(tsdn_t *tsdn, void *ptr);
bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero, size_t *newsize);
void arena_dalloc_bin_locked_handle_newly_empty(
tsdn_t *tsdn, arena_t *arena, edata_t *slab, bin_t *bin);
void arena_dalloc_bin_locked_handle_newly_nonempty(
tsdn_t *tsdn, arena_t *arena, edata_t *slab, bin_t *bin);
void arena_dalloc_small(tsdn_t *tsdn, void *ptr);
bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero, size_t *newsize);
void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
size_t size, size_t alignment, bool zero, bool slab, tcache_t *tcache,
hook_ralloc_args_t *hook_args);
dss_prec_t arena_dss_prec_get(arena_t *arena);
ehooks_t *arena_get_ehooks(arena_t *arena);
extent_hooks_t *arena_set_extent_hooks(tsd_t *tsd, arena_t *arena,
extent_hooks_t *extent_hooks);
bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
void arena_name_get(arena_t *arena, char *name);
void arena_name_set(arena_t *arena, const char *name);
dss_prec_t arena_dss_prec_get(arena_t *arena);
ehooks_t *arena_get_ehooks(arena_t *arena);
extent_hooks_t *arena_set_extent_hooks(
tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks);
bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
void arena_name_get(arena_t *arena, char *name);
void arena_name_set(arena_t *arena, const char *name);
ssize_t arena_dirty_decay_ms_default_get(void);
bool arena_dirty_decay_ms_default_set(ssize_t decay_ms);
bool arena_dirty_decay_ms_default_set(ssize_t decay_ms);
ssize_t arena_muzzy_decay_ms_default_get(void);
bool arena_muzzy_decay_ms_default_set(ssize_t decay_ms);
bool arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena,
size_t *old_limit, size_t *new_limit);
bool arena_muzzy_decay_ms_default_set(ssize_t decay_ms);
bool arena_retain_grow_limit_get_set(
tsd_t *tsd, arena_t *arena, size_t *old_limit, size_t *new_limit);
unsigned arena_nthreads_get(arena_t *arena, bool internal);
void arena_nthreads_inc(arena_t *arena, bool internal);
void arena_nthreads_dec(arena_t *arena, bool internal);
void arena_nthreads_inc(arena_t *arena, bool internal);
void arena_nthreads_dec(arena_t *arena, bool internal);
arena_t *arena_new(tsdn_t *tsdn, unsigned ind, const arena_config_t *config);
bool arena_init_huge(tsdn_t *tsdn, arena_t *a0);
bool arena_init_huge(tsdn_t *tsdn, arena_t *a0);
arena_t *arena_choose_huge(tsd_t *tsd);
bin_t *arena_bin_choose(tsdn_t *tsdn, arena_t *arena, szind_t binind,
unsigned *binshard);
bin_t *arena_bin_choose(
tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned *binshard);
size_t arena_fill_small_fresh(tsdn_t *tsdn, arena_t *arena, szind_t binind,
void **ptrs, size_t nfill, bool zero);
bool arena_boot(sc_data_t *sc_data, base_t *base, bool hpa);
void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
void arena_prefork2(tsdn_t *tsdn, arena_t *arena);
void arena_prefork3(tsdn_t *tsdn, arena_t *arena);
void arena_prefork4(tsdn_t *tsdn, arena_t *arena);
void arena_prefork5(tsdn_t *tsdn, arena_t *arena);
void arena_prefork6(tsdn_t *tsdn, arena_t *arena);
void arena_prefork7(tsdn_t *tsdn, arena_t *arena);
void arena_prefork8(tsdn_t *tsdn, arena_t *arena);
void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
void arena_postfork_child(tsdn_t *tsdn, arena_t *arena);
bool arena_boot(sc_data_t *sc_data, base_t *base, bool hpa);
void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
void arena_prefork2(tsdn_t *tsdn, arena_t *arena);
void arena_prefork3(tsdn_t *tsdn, arena_t *arena);
void arena_prefork4(tsdn_t *tsdn, arena_t *arena);
void arena_prefork5(tsdn_t *tsdn, arena_t *arena);
void arena_prefork6(tsdn_t *tsdn, arena_t *arena);
void arena_prefork7(tsdn_t *tsdn, arena_t *arena);
void arena_prefork8(tsdn_t *tsdn, arena_t *arena);
void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
void arena_postfork_child(tsdn_t *tsdn, arena_t *arena);
#endif /* JEMALLOC_INTERNAL_ARENA_EXTERNS_H */

View file

@ -21,8 +21,8 @@
static inline arena_t *
arena_get_from_edata(edata_t *edata) {
return (arena_t *)atomic_load_p(&arenas[edata_arena_ind_get(edata)],
ATOMIC_RELAXED);
return (arena_t *)atomic_load_p(
&arenas[edata_arena_ind_get(edata)], ATOMIC_RELAXED);
}
JEMALLOC_ALWAYS_INLINE arena_t *
@ -61,15 +61,17 @@ large_dalloc_safety_checks(edata_t *edata, const void *ptr, size_t input_size) {
* The cost is low enough (as edata will be accessed anyway) to be
* enabled all the time.
*/
if (unlikely(edata == NULL ||
edata_state_get(edata) != extent_state_active)) {
safety_check_fail("Invalid deallocation detected: "
if (unlikely(edata == NULL
|| edata_state_get(edata) != extent_state_active)) {
safety_check_fail(
"Invalid deallocation detected: "
"pages being freed (%p) not currently active, "
"possibly caused by double free bugs.", ptr);
"possibly caused by double free bugs.",
ptr);
return true;
}
if (unlikely(input_size != edata_usize_get(edata) ||
input_size > SC_LARGE_MAXCLASS)) {
if (unlikely(input_size != edata_usize_get(edata)
|| input_size > SC_LARGE_MAXCLASS)) {
safety_check_fail_sized_dealloc(/* current_dealloc */ true, ptr,
/* true_size */ edata_usize_get(edata), input_size);
return true;
@ -86,25 +88,26 @@ arena_prof_info_get(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx,
assert(prof_info != NULL);
edata_t *edata = NULL;
bool is_slab;
bool is_slab;
/* Static check. */
if (alloc_ctx == NULL) {
edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
ptr);
edata = emap_edata_lookup(
tsd_tsdn(tsd), &arena_emap_global, ptr);
is_slab = edata_slab_get(edata);
} else if (unlikely(!(is_slab = alloc_ctx->slab))) {
edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
ptr);
edata = emap_edata_lookup(
tsd_tsdn(tsd), &arena_emap_global, ptr);
}
if (unlikely(!is_slab)) {
/* edata must have been initialized at this point. */
assert(edata != NULL);
size_t usize = (alloc_ctx == NULL)? edata_usize_get(edata):
emap_alloc_ctx_usize_get(alloc_ctx);
if (reset_recent &&
large_dalloc_safety_checks(edata, ptr, usize)) {
size_t usize = (alloc_ctx == NULL)
? edata_usize_get(edata)
: emap_alloc_ctx_usize_get(alloc_ctx);
if (reset_recent
&& large_dalloc_safety_checks(edata, ptr, usize)) {
prof_info->alloc_tctx = PROF_TCTX_SENTINEL;
return;
}
@ -119,22 +122,22 @@ arena_prof_info_get(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx,
}
JEMALLOC_ALWAYS_INLINE void
arena_prof_tctx_reset(tsd_t *tsd, const void *ptr,
emap_alloc_ctx_t *alloc_ctx) {
arena_prof_tctx_reset(
tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx) {
cassert(config_prof);
assert(ptr != NULL);
/* Static check. */
if (alloc_ctx == NULL) {
edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd),
&arena_emap_global, ptr);
edata_t *edata = emap_edata_lookup(
tsd_tsdn(tsd), &arena_emap_global, ptr);
if (unlikely(!edata_slab_get(edata))) {
large_prof_tctx_reset(edata);
}
} else {
if (unlikely(!alloc_ctx->slab)) {
edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd),
&arena_emap_global, ptr);
edata_t *edata = emap_edata_lookup(
tsd_tsdn(tsd), &arena_emap_global, ptr);
large_prof_tctx_reset(edata);
}
}
@ -145,16 +148,16 @@ arena_prof_tctx_reset_sampled(tsd_t *tsd, const void *ptr) {
cassert(config_prof);
assert(ptr != NULL);
edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
ptr);
edata_t *edata = emap_edata_lookup(
tsd_tsdn(tsd), &arena_emap_global, ptr);
assert(!edata_slab_get(edata));
large_prof_tctx_reset(edata);
}
JEMALLOC_ALWAYS_INLINE void
arena_prof_info_set(tsd_t *tsd, edata_t *edata, prof_tctx_t *tctx,
size_t size) {
arena_prof_info_set(
tsd_t *tsd, edata_t *edata, prof_tctx_t *tctx, size_t size) {
cassert(config_prof);
assert(!edata_slab_get(edata));
@ -177,9 +180,9 @@ arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) {
* use a single ticker for all of them.
*/
ticker_geom_t *decay_ticker = tsd_arena_decay_tickerp_get(tsd);
uint64_t *prng_state = tsd_prng_statep_get(tsd);
uint64_t *prng_state = tsd_prng_statep_get(tsd);
if (unlikely(ticker_geom_ticks(decay_ticker, prng_state, nticks,
tsd_reentrancy_level_get(tsd) > 0))) {
tsd_reentrancy_level_get(tsd) > 0))) {
arena_decay(tsdn, arena, false, false);
}
}
@ -197,14 +200,13 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
if (likely(tcache != NULL)) {
if (likely(slab)) {
assert(sz_can_use_slab(size));
return tcache_alloc_small(tsdn_tsd(tsdn), arena,
tcache, size, ind, zero, slow_path);
} else if (likely(
ind < tcache_nbins_get(tcache->tcache_slow) &&
!tcache_bin_disabled(ind, &tcache->bins[ind],
tcache->tcache_slow))) {
return tcache_alloc_large(tsdn_tsd(tsdn), arena,
tcache, size, ind, zero, slow_path);
return tcache_alloc_small(tsdn_tsd(tsdn), arena, tcache,
size, ind, zero, slow_path);
} else if (likely(ind < tcache_nbins_get(tcache->tcache_slow)
&& !tcache_bin_disabled(ind, &tcache->bins[ind],
tcache->tcache_slow))) {
return tcache_alloc_large(tsdn_tsd(tsdn), arena, tcache,
size, ind, zero, slow_path);
}
/* (size > tcache_max) case falls through. */
}
@ -241,8 +243,8 @@ arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
*/
emap_full_alloc_ctx_t full_alloc_ctx;
bool missing = emap_full_alloc_ctx_try_lookup(tsdn, &arena_emap_global,
ptr, &full_alloc_ctx);
bool missing = emap_full_alloc_ctx_try_lookup(
tsdn, &arena_emap_global, ptr, &full_alloc_ctx);
if (missing) {
return 0;
}
@ -261,8 +263,8 @@ arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
}
static inline void
arena_dalloc_large_no_tcache(tsdn_t *tsdn, void *ptr, szind_t szind,
size_t usize) {
arena_dalloc_large_no_tcache(
tsdn_t *tsdn, void *ptr, szind_t szind, size_t usize) {
/*
* szind is still needed in this function mainly becuase
* szind < SC_NBINS determines not only if this is a small alloc,
@ -272,8 +274,8 @@ arena_dalloc_large_no_tcache(tsdn_t *tsdn, void *ptr, szind_t szind,
if (config_prof && unlikely(szind < SC_NBINS)) {
arena_dalloc_promoted(tsdn, ptr, NULL, true);
} else {
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
ptr);
edata_t *edata = emap_edata_lookup(
tsdn, &arena_emap_global, ptr);
if (large_dalloc_safety_checks(edata, ptr, usize)) {
/* See the comment in isfree. */
return;
@ -290,13 +292,13 @@ arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, &alloc_ctx);
if (config_debug) {
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
ptr);
edata_t *edata = emap_edata_lookup(
tsdn, &arena_emap_global, ptr);
assert(alloc_ctx.szind == edata_szind_get(edata));
assert(alloc_ctx.szind < SC_NSIZES);
assert(alloc_ctx.slab == edata_slab_get(edata));
assert(emap_alloc_ctx_usize_get(&alloc_ctx) ==
edata_usize_get(edata));
assert(emap_alloc_ctx_usize_get(&alloc_ctx)
== edata_usize_get(edata));
}
if (likely(alloc_ctx.slab)) {
@ -311,19 +313,19 @@ arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
JEMALLOC_ALWAYS_INLINE void
arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind,
size_t usize, bool slow_path) {
assert (!tsdn_null(tsdn) && tcache != NULL);
assert(!tsdn_null(tsdn) && tcache != NULL);
bool is_sample_promoted = config_prof && szind < SC_NBINS;
if (unlikely(is_sample_promoted)) {
arena_dalloc_promoted(tsdn, ptr, tcache, slow_path);
} else {
if (szind < tcache_nbins_get(tcache->tcache_slow) &&
!tcache_bin_disabled(szind, &tcache->bins[szind],
tcache->tcache_slow)) {
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, szind,
slow_path);
if (szind < tcache_nbins_get(tcache->tcache_slow)
&& !tcache_bin_disabled(
szind, &tcache->bins[szind], tcache->tcache_slow)) {
tcache_dalloc_large(
tsdn_tsd(tsdn), tcache, ptr, szind, slow_path);
} else {
edata_t *edata = emap_edata_lookup(tsdn,
&arena_emap_global, ptr);
edata_t *edata = emap_edata_lookup(
tsdn, &arena_emap_global, ptr);
if (large_dalloc_safety_checks(edata, ptr, usize)) {
/* See the comment in isfree. */
return;
@ -335,16 +337,17 @@ arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind,
/* Find the region index of a pointer. */
JEMALLOC_ALWAYS_INLINE size_t
arena_slab_regind_impl(div_info_t* div_info, szind_t binind,
edata_t *slab, const void *ptr) {
arena_slab_regind_impl(
div_info_t *div_info, szind_t binind, edata_t *slab, const void *ptr) {
size_t diff, regind;
/* Freeing a pointer outside the slab can cause assertion failure. */
assert((uintptr_t)ptr >= (uintptr_t)edata_addr_get(slab));
assert((uintptr_t)ptr < (uintptr_t)edata_past_get(slab));
/* Freeing an interior pointer can cause assertion failure. */
assert(((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab)) %
(uintptr_t)bin_infos[binind].reg_size == 0);
assert(((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab))
% (uintptr_t)bin_infos[binind].reg_size
== 0);
diff = (size_t)((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab));
@ -360,22 +363,23 @@ arena_tcache_dalloc_small_safety_check(tsdn_t *tsdn, void *ptr) {
if (!config_debug) {
return false;
}
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
szind_t binind = edata_szind_get(edata);
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
szind_t binind = edata_szind_get(edata);
div_info_t div_info = arena_binind_div_info[binind];
/*
* Calls the internal function arena_slab_regind_impl because the
* safety check does not require a lock.
*/
size_t regind = arena_slab_regind_impl(&div_info, binind, edata, ptr);
slab_data_t *slab_data = edata_slab_data_get(edata);
slab_data_t *slab_data = edata_slab_data_get(edata);
const bin_info_t *bin_info = &bin_infos[binind];
assert(edata_nfree_get(edata) < bin_info->nregs);
if (unlikely(!bitmap_get(slab_data->bitmap, &bin_info->bitmap_info,
regind))) {
if (unlikely(!bitmap_get(
slab_data->bitmap, &bin_info->bitmap_info, regind))) {
safety_check_fail(
"Invalid deallocation detected: the pointer being freed (%p) not "
"currently active, possibly caused by double free bugs.\n", ptr);
"currently active, possibly caused by double free bugs.\n",
ptr);
return true;
}
return false;
@ -397,18 +401,18 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
alloc_ctx = *caller_alloc_ctx;
} else {
util_assume(tsdn != NULL);
emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr,
&alloc_ctx);
emap_alloc_ctx_lookup(
tsdn, &arena_emap_global, ptr, &alloc_ctx);
}
if (config_debug) {
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
ptr);
edata_t *edata = emap_edata_lookup(
tsdn, &arena_emap_global, ptr);
assert(alloc_ctx.szind == edata_szind_get(edata));
assert(alloc_ctx.szind < SC_NSIZES);
assert(alloc_ctx.slab == edata_slab_get(edata));
assert(emap_alloc_ctx_usize_get(&alloc_ctx) ==
edata_usize_get(edata));
assert(emap_alloc_ctx_usize_get(&alloc_ctx)
== edata_usize_get(edata));
}
if (likely(alloc_ctx.slab)) {
@ -416,8 +420,8 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
if (arena_tcache_dalloc_small_safety_check(tsdn, ptr)) {
return;
}
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
alloc_ctx.szind, slow_path);
tcache_dalloc_small(
tsdn_tsd(tsdn), tcache, ptr, alloc_ctx.szind, slow_path);
} else {
arena_dalloc_large(tsdn, ptr, tcache, alloc_ctx.szind,
emap_alloc_ctx_usize_get(&alloc_ctx), slow_path);
@ -436,21 +440,21 @@ arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
* object, so base szind and slab on the given size.
*/
szind_t szind = sz_size2index(size);
emap_alloc_ctx_init(&alloc_ctx, szind, (szind < SC_NBINS),
size);
emap_alloc_ctx_init(
&alloc_ctx, szind, (szind < SC_NBINS), size);
}
if ((config_prof && opt_prof) || config_debug) {
emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr,
&alloc_ctx);
emap_alloc_ctx_lookup(
tsdn, &arena_emap_global, ptr, &alloc_ctx);
assert(alloc_ctx.szind == sz_size2index(size));
assert((config_prof && opt_prof)
|| alloc_ctx.slab == (alloc_ctx.szind < SC_NBINS));
if (config_debug) {
edata_t *edata = emap_edata_lookup(tsdn,
&arena_emap_global, ptr);
edata_t *edata = emap_edata_lookup(
tsdn, &arena_emap_global, ptr);
assert(alloc_ctx.szind == edata_szind_get(edata));
assert(alloc_ctx.slab == edata_slab_get(edata));
}
@ -481,8 +485,8 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
if (config_prof && opt_prof) {
if (caller_alloc_ctx == NULL) {
/* Uncommon case and should be a static check. */
emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr,
&alloc_ctx);
emap_alloc_ctx_lookup(
tsdn, &arena_emap_global, ptr, &alloc_ctx);
assert(alloc_ctx.szind == sz_size2index(size));
assert(emap_alloc_ctx_usize_get(&alloc_ctx) == size);
} else {
@ -498,14 +502,14 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
}
if (config_debug) {
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
ptr);
edata_t *edata = emap_edata_lookup(
tsdn, &arena_emap_global, ptr);
assert(alloc_ctx.szind == edata_szind_get(edata));
assert(alloc_ctx.slab == edata_slab_get(edata));
emap_alloc_ctx_init(&alloc_ctx, alloc_ctx.szind, alloc_ctx.slab,
sz_s2u(size));
assert(emap_alloc_ctx_usize_get(&alloc_ctx) ==
edata_usize_get(edata));
emap_alloc_ctx_init(
&alloc_ctx, alloc_ctx.szind, alloc_ctx.slab, sz_s2u(size));
assert(emap_alloc_ctx_usize_get(&alloc_ctx)
== edata_usize_get(edata));
}
if (likely(alloc_ctx.slab)) {
@ -513,8 +517,8 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
if (arena_tcache_dalloc_small_safety_check(tsdn, ptr)) {
return;
}
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
alloc_ctx.szind, slow_path);
tcache_dalloc_small(
tsdn_tsd(tsdn), tcache, ptr, alloc_ctx.szind, slow_path);
} else {
arena_dalloc_large(tsdn, ptr, tcache, alloc_ctx.szind,
sz_s2u(size), slow_path);
@ -522,13 +526,13 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
}
static inline void
arena_cache_oblivious_randomize(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
size_t alignment) {
arena_cache_oblivious_randomize(
tsdn_t *tsdn, arena_t *arena, edata_t *edata, size_t alignment) {
assert(edata_base_get(edata) == edata_addr_get(edata));
if (alignment < PAGE) {
unsigned lg_range = LG_PAGE -
lg_floor(CACHELINE_CEILING(alignment));
unsigned lg_range = LG_PAGE
- lg_floor(CACHELINE_CEILING(alignment));
size_t r;
if (!tsdn_null(tsdn)) {
tsd_t *tsd = tsdn_tsd(tsdn);
@ -538,12 +542,12 @@ arena_cache_oblivious_randomize(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
uint64_t stack_value = (uint64_t)(uintptr_t)&r;
r = (size_t)prng_lg_range_u64(&stack_value, lg_range);
}
uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE -
lg_range);
edata->e_addr = (void *)((byte_t *)edata->e_addr +
random_offset);
assert(ALIGNMENT_ADDR2BASE(edata->e_addr, alignment) ==
edata->e_addr);
uintptr_t random_offset = ((uintptr_t)r)
<< (LG_PAGE - lg_range);
edata->e_addr = (void *)((byte_t *)edata->e_addr
+ random_offset);
assert(ALIGNMENT_ADDR2BASE(edata->e_addr, alignment)
== edata->e_addr);
}
}
@ -556,20 +560,21 @@ arena_cache_oblivious_randomize(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
typedef struct arena_dalloc_bin_locked_info_s arena_dalloc_bin_locked_info_t;
struct arena_dalloc_bin_locked_info_s {
div_info_t div_info;
uint32_t nregs;
uint64_t ndalloc;
uint32_t nregs;
uint64_t ndalloc;
};
JEMALLOC_ALWAYS_INLINE size_t
arena_slab_regind(arena_dalloc_bin_locked_info_t *info, szind_t binind,
edata_t *slab, const void *ptr) {
size_t regind = arena_slab_regind_impl(&info->div_info, binind, slab, ptr);
size_t regind = arena_slab_regind_impl(
&info->div_info, binind, slab, ptr);
return regind;
}
JEMALLOC_ALWAYS_INLINE void
arena_dalloc_bin_locked_begin(arena_dalloc_bin_locked_info_t *info,
szind_t binind) {
arena_dalloc_bin_locked_begin(
arena_dalloc_bin_locked_info_t *info, szind_t binind) {
info->div_info = arena_binind_div_info[binind];
info->nregs = bin_infos[binind].nregs;
info->ndalloc = 0;
@ -589,8 +594,8 @@ arena_dalloc_bin_locked_step(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
void *ptr, edata_t **dalloc_slabs, unsigned ndalloc_slabs,
unsigned *dalloc_slabs_count, edata_list_active_t *dalloc_slabs_extra) {
const bin_info_t *bin_info = &bin_infos[binind];
size_t regind = arena_slab_regind(info, binind, slab, ptr);
slab_data_t *slab_data = edata_slab_data_get(slab);
size_t regind = arena_slab_regind(info, binind, slab, ptr);
slab_data_t *slab_data = edata_slab_data_get(slab);
assert(edata_nfree_get(slab) < bin_info->nregs);
/* Freeing an unallocated pointer can cause assertion failure. */
@ -605,8 +610,8 @@ arena_dalloc_bin_locked_step(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
unsigned nfree = edata_nfree_get(slab);
if (nfree == bin_info->nregs) {
arena_dalloc_bin_locked_handle_newly_empty(tsdn, arena, slab,
bin);
arena_dalloc_bin_locked_handle_newly_empty(
tsdn, arena, slab, bin);
if (*dalloc_slabs_count < ndalloc_slabs) {
dalloc_slabs[*dalloc_slabs_count] = slab;
@ -615,8 +620,8 @@ arena_dalloc_bin_locked_step(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
edata_list_active_append(dalloc_slabs_extra, slab);
}
} else if (nfree == 1 && slab != bin->slabcur) {
arena_dalloc_bin_locked_handle_newly_nonempty(tsdn, arena, slab,
bin);
arena_dalloc_bin_locked_handle_newly_nonempty(
tsdn, arena, slab, bin);
}
}
@ -637,21 +642,20 @@ arena_bin_flush_batch_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
edata_list_active_t *dalloc_slabs_extra) {
assert(binind < bin_info_nbatched_sizes);
bin_with_batch_t *batched_bin = (bin_with_batch_t *)bin;
size_t nelems_to_pop = batcher_pop_begin(tsdn,
&batched_bin->remote_frees);
size_t nelems_to_pop = batcher_pop_begin(
tsdn, &batched_bin->remote_frees);
bin_batching_test_mid_pop(nelems_to_pop);
if (nelems_to_pop == BATCHER_NO_IDX) {
malloc_mutex_assert_not_owner(tsdn,
&batched_bin->remote_frees.mtx);
malloc_mutex_assert_not_owner(
tsdn, &batched_bin->remote_frees.mtx);
return;
} else {
malloc_mutex_assert_owner(tsdn,
&batched_bin->remote_frees.mtx);
malloc_mutex_assert_owner(tsdn, &batched_bin->remote_frees.mtx);
}
size_t npushes = batcher_pop_get_pushes(tsdn,
&batched_bin->remote_frees);
size_t npushes = batcher_pop_get_pushes(
tsdn, &batched_bin->remote_frees);
bin_remote_free_data_t remote_free_data[BIN_REMOTE_FREE_ELEMS_MAX];
for (size_t i = 0; i < nelems_to_pop; i++) {
remote_free_data[i] = batched_bin->remote_free_data[i];
@ -682,8 +686,8 @@ struct arena_bin_flush_batch_state_s {
* backup array for any "extra" slabs, as well as a a list to allow a
* dynamic number of ones exceeding that array.
*/
edata_t *dalloc_slabs[8];
unsigned dalloc_slab_count;
edata_t *dalloc_slabs[8];
unsigned dalloc_slab_count;
edata_list_active_t dalloc_slabs_extra;
};
@ -712,8 +716,8 @@ arena_bin_flush_batch_after_lock(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
preallocated_slabs);
arena_bin_flush_batch_impl(tsdn, arena, bin, &state->info, binind,
state->dalloc_slabs, ndalloc_slabs,
&state->dalloc_slab_count, &state->dalloc_slabs_extra);
state->dalloc_slabs, ndalloc_slabs, &state->dalloc_slab_count,
&state->dalloc_slabs_extra);
}
JEMALLOC_ALWAYS_INLINE void
@ -769,8 +773,8 @@ arena_get_bin(arena_t *arena, szind_t binind, unsigned binshard) {
ret = shard0 + binshard;
}
assert(binind >= SC_NBINS - 1
|| (uintptr_t)ret < (uintptr_t)arena
+ arena_bin_offsets[binind + 1]);
|| (uintptr_t)ret
< (uintptr_t)arena + arena_bin_offsets[binind + 1]);
return ret;
}

View file

@ -17,31 +17,31 @@ struct arena_stats_large_s {
* Total number of large allocation/deallocation requests served directly
* by the arena.
*/
locked_u64_t nmalloc;
locked_u64_t ndalloc;
locked_u64_t nmalloc;
locked_u64_t ndalloc;
/*
* Total large active bytes (allocated - deallocated) served directly
* by the arena.
*/
locked_u64_t active_bytes;
locked_u64_t active_bytes;
/*
* Number of allocation requests that correspond to this size class.
* This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
locked_u64_t nrequests; /* Partially derived. */
locked_u64_t nrequests; /* Partially derived. */
/*
* Number of tcache fills / flushes for large (similarly, periodically
* merged). Note that there is no large tcache batch-fill currently
* (i.e. only fill 1 at a time); however flush may be batched.
*/
locked_u64_t nfills; /* Partially derived. */
locked_u64_t nflushes; /* Partially derived. */
locked_u64_t nfills; /* Partially derived. */
locked_u64_t nflushes; /* Partially derived. */
/* Current number of allocations of this size class. */
size_t curlextents; /* Derived. */
size_t curlextents; /* Derived. */
};
/*
@ -57,40 +57,40 @@ struct arena_stats_s {
* resident includes the base stats -- that's why it lives here and not
* in pa_shard_stats_t.
*/
size_t base; /* Derived. */
size_t metadata_edata; /* Derived. */
size_t metadata_rtree; /* Derived. */
size_t resident; /* Derived. */
size_t metadata_thp; /* Derived. */
size_t mapped; /* Derived. */
size_t base; /* Derived. */
size_t metadata_edata; /* Derived. */
size_t metadata_rtree; /* Derived. */
size_t resident; /* Derived. */
size_t metadata_thp; /* Derived. */
size_t mapped; /* Derived. */
atomic_zu_t internal;
atomic_zu_t internal;
size_t allocated_large; /* Derived. */
uint64_t nmalloc_large; /* Derived. */
uint64_t ndalloc_large; /* Derived. */
uint64_t nfills_large; /* Derived. */
uint64_t nflushes_large; /* Derived. */
uint64_t nrequests_large; /* Derived. */
size_t allocated_large; /* Derived. */
uint64_t nmalloc_large; /* Derived. */
uint64_t ndalloc_large; /* Derived. */
uint64_t nfills_large; /* Derived. */
uint64_t nflushes_large; /* Derived. */
uint64_t nrequests_large; /* Derived. */
/*
* The stats logically owned by the pa_shard in the same arena. This
* lives here only because it's convenient for the purposes of the ctl
* module -- it only knows about the single arena_stats.
*/
pa_shard_stats_t pa_shard_stats;
pa_shard_stats_t pa_shard_stats;
/* Number of bytes cached in tcache associated with this arena. */
size_t tcache_bytes; /* Derived. */
size_t tcache_stashed_bytes; /* Derived. */
size_t tcache_bytes; /* Derived. */
size_t tcache_stashed_bytes; /* Derived. */
mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
/* One element for each large size class. */
arena_stats_large_t lstats[SC_NSIZES - SC_NBINS];
arena_stats_large_t lstats[SC_NSIZES - SC_NBINS];
/* Arena uptime. */
nstime_t uptime;
nstime_t uptime;
};
static inline bool
@ -101,7 +101,7 @@ arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) {
}
}
if (LOCKEDINT_MTX_INIT(arena_stats->mtx, "arena_stats",
WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) {
WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) {
return true;
}
/* Memory is zeroed, so there is no need to clear stats. */
@ -115,8 +115,8 @@ arena_stats_large_flush_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
arena_stats_large_t *lstats = &arena_stats->lstats[szind - SC_NBINS];
locked_inc_u64(tsdn, LOCKEDINT_MTX(arena_stats->mtx),
&lstats->nrequests, nrequests);
locked_inc_u64(tsdn, LOCKEDINT_MTX(arena_stats->mtx),
&lstats->nflushes, 1);
locked_inc_u64(
tsdn, LOCKEDINT_MTX(arena_stats->mtx), &lstats->nflushes, 1);
LOCKEDINT_MTX_UNLOCK(tsdn, arena_stats->mtx);
}

View file

@ -32,20 +32,20 @@ struct arena_s {
*
* Synchronization: atomic.
*/
atomic_u_t nthreads[2];
atomic_u_t nthreads[2];
/* Next bin shard for binding new threads. Synchronization: atomic. */
atomic_u_t binshard_next;
atomic_u_t binshard_next;
/*
* When percpu_arena is enabled, to amortize the cost of reading /
* updating the current CPU id, track the most recent thread accessing
* this arena, and only read CPU if there is a mismatch.
*/
tsdn_t *last_thd;
tsdn_t *last_thd;
/* Synchronization: internal. */
arena_stats_t stats;
arena_stats_t stats;
/*
* Lists of tcaches and cache_bin_array_descriptors for extant threads
@ -54,28 +54,28 @@ struct arena_s {
*
* Synchronization: tcache_ql_mtx.
*/
ql_head(tcache_slow_t) tcache_ql;
ql_head(cache_bin_array_descriptor_t) cache_bin_array_descriptor_ql;
malloc_mutex_t tcache_ql_mtx;
ql_head(tcache_slow_t) tcache_ql;
ql_head(cache_bin_array_descriptor_t) cache_bin_array_descriptor_ql;
malloc_mutex_t tcache_ql_mtx;
/*
* Represents a dss_prec_t, but atomically.
*
* Synchronization: atomic.
*/
atomic_u_t dss_prec;
atomic_u_t dss_prec;
/*
* Extant large allocations.
*
* Synchronization: large_mtx.
*/
edata_list_active_t large;
edata_list_active_t large;
/* Synchronizes all large allocation/update/deallocation. */
malloc_mutex_t large_mtx;
malloc_mutex_t large_mtx;
/* The page-level allocator shard this arena uses. */
pa_shard_t pa_shard;
pa_shard_t pa_shard;
/*
* A cached copy of base->ind. This can get accessed on hot paths;
@ -88,12 +88,12 @@ struct arena_s {
*
* Synchronization: internal.
*/
base_t *base;
base_t *base;
/* Used to determine uptime. Read-only after initialization. */
nstime_t create_time;
nstime_t create_time;
/* The name of the arena. */
char name[ARENA_NAME_LEN];
char name[ARENA_NAME_LEN];
/*
* The arena is allocated alongside its bins; really this is a
@ -101,10 +101,11 @@ struct arena_s {
* Enforcing cacheline-alignment to minimize the number of cachelines
* touched on the hot paths.
*/
JEMALLOC_WARN_ON_USAGE("Do not use this field directly. "
"Use `arena_get_bin` instead.")
JEMALLOC_WARN_ON_USAGE(
"Do not use this field directly. "
"Use `arena_get_bin` instead.")
JEMALLOC_ALIGNED(CACHELINE)
bin_with_batch_t all_bins[0];
bin_with_batch_t all_bins[0];
};
#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_H */

View file

@ -5,38 +5,38 @@
#include "jemalloc/internal/sc.h"
/* Default decay times in milliseconds. */
#define DIRTY_DECAY_MS_DEFAULT ZD(10 * 1000)
#define MUZZY_DECAY_MS_DEFAULT (0)
#define DIRTY_DECAY_MS_DEFAULT ZD(10 * 1000)
#define MUZZY_DECAY_MS_DEFAULT (0)
/* Number of event ticks between time checks. */
#define ARENA_DECAY_NTICKS_PER_UPDATE 1000
#define ARENA_DECAY_NTICKS_PER_UPDATE 1000
/* Maximum length of the arena name. */
#define ARENA_NAME_LEN 32
typedef struct arena_decay_s arena_decay_t;
typedef struct arena_s arena_t;
typedef struct arena_s arena_t;
typedef enum {
percpu_arena_mode_names_base = 0, /* Used for options processing. */
percpu_arena_mode_names_base = 0, /* Used for options processing. */
/*
* *_uninit are used only during bootstrapping, and must correspond
* to initialized variant plus percpu_arena_mode_enabled_base.
*/
percpu_arena_uninit = 0,
per_phycpu_arena_uninit = 1,
percpu_arena_uninit = 0,
per_phycpu_arena_uninit = 1,
/* All non-disabled modes must come after percpu_arena_disabled. */
percpu_arena_disabled = 2,
percpu_arena_disabled = 2,
percpu_arena_mode_names_limit = 3, /* Used for options processing. */
percpu_arena_mode_names_limit = 3, /* Used for options processing. */
percpu_arena_mode_enabled_base = 3,
percpu_arena = 3,
per_phycpu_arena = 4 /* Hyper threads share arena. */
percpu_arena = 3,
per_phycpu_arena = 4 /* Hyper threads share arena. */
} percpu_arena_mode_t;
#define PERCPU_ARENA_ENABLED(m) ((m) >= percpu_arena_mode_enabled_base)
#define PERCPU_ARENA_DEFAULT percpu_arena_disabled
#define PERCPU_ARENA_ENABLED(m) ((m) >= percpu_arena_mode_enabled_base)
#define PERCPU_ARENA_DEFAULT percpu_arena_disabled
/*
* When allocation_size >= oversize_threshold, use the dedicated huge arena

View file

@ -7,51 +7,57 @@
* assertion failure.
*/
#ifndef assert
#define assert(e) do { \
if (unlikely(config_debug && !(e))) { \
malloc_printf( \
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
__FILE__, __LINE__, #e); \
abort(); \
} \
} while (0)
# define assert(e) \
do { \
if (unlikely(config_debug && !(e))) { \
malloc_printf( \
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
__FILE__, __LINE__, #e); \
abort(); \
} \
} while (0)
#endif
#ifndef not_reached
#define not_reached() do { \
if (config_debug) { \
malloc_printf( \
"<jemalloc>: %s:%d: Unreachable code reached\n", \
__FILE__, __LINE__); \
abort(); \
} \
unreachable(); \
} while (0)
# define not_reached() \
do { \
if (config_debug) { \
malloc_printf( \
"<jemalloc>: %s:%d: Unreachable code reached\n", \
__FILE__, __LINE__); \
abort(); \
} \
unreachable(); \
} while (0)
#endif
#ifndef not_implemented
#define not_implemented() do { \
if (config_debug) { \
malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
__FILE__, __LINE__); \
abort(); \
} \
} while (0)
# define not_implemented() \
do { \
if (config_debug) { \
malloc_printf( \
"<jemalloc>: %s:%d: Not implemented\n", \
__FILE__, __LINE__); \
abort(); \
} \
} while (0)
#endif
#ifndef assert_not_implemented
#define assert_not_implemented(e) do { \
if (unlikely(config_debug && !(e))) { \
not_implemented(); \
} \
} while (0)
# define assert_not_implemented(e) \
do { \
if (unlikely(config_debug && !(e))) { \
not_implemented(); \
} \
} while (0)
#endif
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
#ifndef cassert
#define cassert(c) do { \
if (unlikely(!(c))) { \
not_reached(); \
} \
} while (0)
# define cassert(c) \
do { \
if (unlikely(!(c))) { \
not_reached(); \
} \
} while (0)
#endif

View file

@ -5,21 +5,21 @@
#define JEMALLOC_U8_ATOMICS
#if defined(JEMALLOC_GCC_ATOMIC_ATOMICS)
# include "jemalloc/internal/atomic_gcc_atomic.h"
# if !defined(JEMALLOC_GCC_U8_ATOMIC_ATOMICS)
# undef JEMALLOC_U8_ATOMICS
# endif
# include "jemalloc/internal/atomic_gcc_atomic.h"
# if !defined(JEMALLOC_GCC_U8_ATOMIC_ATOMICS)
# undef JEMALLOC_U8_ATOMICS
# endif
#elif defined(JEMALLOC_GCC_SYNC_ATOMICS)
# include "jemalloc/internal/atomic_gcc_sync.h"
# if !defined(JEMALLOC_GCC_U8_SYNC_ATOMICS)
# undef JEMALLOC_U8_ATOMICS
# endif
# include "jemalloc/internal/atomic_gcc_sync.h"
# if !defined(JEMALLOC_GCC_U8_SYNC_ATOMICS)
# undef JEMALLOC_U8_ATOMICS
# endif
#elif defined(_MSC_VER)
# include "jemalloc/internal/atomic_msvc.h"
# include "jemalloc/internal/atomic_msvc.h"
#elif defined(JEMALLOC_C11_ATOMICS)
# include "jemalloc/internal/atomic_c11.h"
# include "jemalloc/internal/atomic_c11.h"
#else
# error "Don't have atomics implemented on this platform."
# error "Don't have atomics implemented on this platform."
#endif
#define ATOMIC_INLINE JEMALLOC_ALWAYS_INLINE
@ -56,22 +56,19 @@
/*
* Another convenience -- simple atomic helper functions.
*/
#define JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(type, short_type, \
lg_size) \
JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size) \
ATOMIC_INLINE void \
atomic_load_add_store_##short_type(atomic_##short_type##_t *a, \
type inc) { \
type oldval = atomic_load_##short_type(a, ATOMIC_RELAXED); \
type newval = oldval + inc; \
atomic_store_##short_type(a, newval, ATOMIC_RELAXED); \
} \
ATOMIC_INLINE void \
atomic_load_sub_store_##short_type(atomic_##short_type##_t *a, \
type inc) { \
type oldval = atomic_load_##short_type(a, ATOMIC_RELAXED); \
type newval = oldval - inc; \
atomic_store_##short_type(a, newval, ATOMIC_RELAXED); \
#define JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(type, short_type, lg_size) \
JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size) \
ATOMIC_INLINE void atomic_load_add_store_##short_type( \
atomic_##short_type##_t *a, type inc) { \
type oldval = atomic_load_##short_type(a, ATOMIC_RELAXED); \
type newval = oldval + inc; \
atomic_store_##short_type(a, newval, ATOMIC_RELAXED); \
} \
ATOMIC_INLINE void atomic_load_sub_store_##short_type( \
atomic_##short_type##_t *a, type inc) { \
type oldval = atomic_load_##short_type(a, ATOMIC_RELAXED); \
type newval = oldval - inc; \
atomic_store_##short_type(a, newval, ATOMIC_RELAXED); \
}
/*
@ -79,7 +76,7 @@
* fact.
*/
#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
# define JEMALLOC_ATOMIC_U64
# define JEMALLOC_ATOMIC_U64
#endif
JEMALLOC_GENERATE_ATOMICS(void *, p, LG_SIZEOF_PTR)

View file

@ -66,35 +66,29 @@ atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
* Integral types have some special operations available that non-integral ones
* lack.
*/
#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \
/* unused */ lg_size) \
JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \
\
ATOMIC_INLINE type \
atomic_fetch_add_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
return atomic_fetch_add_explicit(a, val, mo); \
} \
\
ATOMIC_INLINE type \
atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
return atomic_fetch_sub_explicit(a, val, mo); \
} \
ATOMIC_INLINE type \
atomic_fetch_and_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
return atomic_fetch_and_explicit(a, val, mo); \
} \
ATOMIC_INLINE type \
atomic_fetch_or_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
return atomic_fetch_or_explicit(a, val, mo); \
} \
ATOMIC_INLINE type \
atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
return atomic_fetch_xor_explicit(a, val, mo); \
}
#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, /* unused */ lg_size) \
JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \
\
ATOMIC_INLINE type atomic_fetch_add_##short_type( \
atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \
return atomic_fetch_add_explicit(a, val, mo); \
} \
\
ATOMIC_INLINE type atomic_fetch_sub_##short_type( \
atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \
return atomic_fetch_sub_explicit(a, val, mo); \
} \
ATOMIC_INLINE type atomic_fetch_and_##short_type( \
atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \
return atomic_fetch_and_explicit(a, val, mo); \
} \
ATOMIC_INLINE type atomic_fetch_or_##short_type( \
atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \
return atomic_fetch_or_explicit(a, val, mo); \
} \
ATOMIC_INLINE type atomic_fetch_xor_##short_type( \
atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \
return atomic_fetch_xor_explicit(a, val, mo); \
}
#endif /* JEMALLOC_INTERNAL_ATOMIC_C11_H */

View file

@ -6,7 +6,8 @@
#define ATOMIC_INLINE JEMALLOC_ALWAYS_INLINE
#define ATOMIC_INIT(...) {__VA_ARGS__}
#define ATOMIC_INIT(...) \
{ __VA_ARGS__ }
typedef enum {
atomic_memory_order_relaxed,
@ -39,95 +40,81 @@ atomic_fence(atomic_memory_order_t mo) {
__atomic_thread_fence(atomic_enum_to_builtin(mo));
}
#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \
/* unused */ lg_size) \
typedef struct { \
type repr; \
} atomic_##short_type##_t; \
\
ATOMIC_INLINE type \
atomic_load_##short_type(const atomic_##short_type##_t *a, \
atomic_memory_order_t mo) { \
type result; \
__atomic_load(&a->repr, &result, atomic_enum_to_builtin(mo)); \
return result; \
} \
\
ATOMIC_INLINE void \
atomic_store_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
__atomic_store(&a->repr, &val, atomic_enum_to_builtin(mo)); \
} \
\
ATOMIC_INLINE type \
atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
type result; \
__atomic_exchange(&a->repr, &val, &result, \
atomic_enum_to_builtin(mo)); \
return result; \
} \
\
ATOMIC_INLINE bool \
atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
UNUSED type *expected, type desired, \
atomic_memory_order_t success_mo, \
atomic_memory_order_t failure_mo) { \
return __atomic_compare_exchange(&a->repr, expected, &desired, \
true, atomic_enum_to_builtin(success_mo), \
atomic_enum_to_builtin(failure_mo)); \
} \
\
ATOMIC_INLINE bool \
atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
UNUSED type *expected, type desired, \
atomic_memory_order_t success_mo, \
atomic_memory_order_t failure_mo) { \
return __atomic_compare_exchange(&a->repr, expected, &desired, \
false, \
atomic_enum_to_builtin(success_mo), \
atomic_enum_to_builtin(failure_mo)); \
}
#define JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \
typedef struct { \
type repr; \
} atomic_##short_type##_t; \
\
ATOMIC_INLINE type atomic_load_##short_type( \
const atomic_##short_type##_t *a, atomic_memory_order_t mo) { \
type result; \
__atomic_load(&a->repr, &result, atomic_enum_to_builtin(mo)); \
return result; \
} \
\
ATOMIC_INLINE void atomic_store_##short_type( \
atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \
__atomic_store(&a->repr, &val, atomic_enum_to_builtin(mo)); \
} \
\
ATOMIC_INLINE type atomic_exchange_##short_type( \
atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \
type result; \
__atomic_exchange( \
&a->repr, &val, &result, atomic_enum_to_builtin(mo)); \
return result; \
} \
\
ATOMIC_INLINE bool atomic_compare_exchange_weak_##short_type( \
atomic_##short_type##_t *a, UNUSED type *expected, type desired, \
atomic_memory_order_t success_mo, \
atomic_memory_order_t failure_mo) { \
return __atomic_compare_exchange(&a->repr, expected, &desired, \
true, atomic_enum_to_builtin(success_mo), \
atomic_enum_to_builtin(failure_mo)); \
} \
\
ATOMIC_INLINE bool atomic_compare_exchange_strong_##short_type( \
atomic_##short_type##_t *a, UNUSED type *expected, type desired, \
atomic_memory_order_t success_mo, \
atomic_memory_order_t failure_mo) { \
return __atomic_compare_exchange(&a->repr, expected, &desired, \
false, atomic_enum_to_builtin(success_mo), \
atomic_enum_to_builtin(failure_mo)); \
}
#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \
/* unused */ lg_size) \
JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \
\
ATOMIC_INLINE type \
atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return __atomic_fetch_add(&a->repr, val, \
atomic_enum_to_builtin(mo)); \
} \
\
ATOMIC_INLINE type \
atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return __atomic_fetch_sub(&a->repr, val, \
atomic_enum_to_builtin(mo)); \
} \
\
ATOMIC_INLINE type \
atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return __atomic_fetch_and(&a->repr, val, \
atomic_enum_to_builtin(mo)); \
} \
\
ATOMIC_INLINE type \
atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return __atomic_fetch_or(&a->repr, val, \
atomic_enum_to_builtin(mo)); \
} \
\
ATOMIC_INLINE type \
atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return __atomic_fetch_xor(&a->repr, val, \
atomic_enum_to_builtin(mo)); \
}
#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, /* unused */ lg_size) \
JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \
\
ATOMIC_INLINE type atomic_fetch_add_##short_type( \
atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \
return __atomic_fetch_add( \
&a->repr, val, atomic_enum_to_builtin(mo)); \
} \
\
ATOMIC_INLINE type atomic_fetch_sub_##short_type( \
atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \
return __atomic_fetch_sub( \
&a->repr, val, atomic_enum_to_builtin(mo)); \
} \
\
ATOMIC_INLINE type atomic_fetch_and_##short_type( \
atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \
return __atomic_fetch_and( \
&a->repr, val, atomic_enum_to_builtin(mo)); \
} \
\
ATOMIC_INLINE type atomic_fetch_or_##short_type( \
atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \
return __atomic_fetch_or( \
&a->repr, val, atomic_enum_to_builtin(mo)); \
} \
\
ATOMIC_INLINE type atomic_fetch_xor_##short_type( \
atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \
return __atomic_fetch_xor( \
&a->repr, val, atomic_enum_to_builtin(mo)); \
}
#undef ATOMIC_INLINE

View file

@ -5,7 +5,8 @@
#define ATOMIC_INLINE JEMALLOC_ALWAYS_INLINE
#define ATOMIC_INIT(...) {__VA_ARGS__}
#define ATOMIC_INIT(...) \
{ __VA_ARGS__ }
typedef enum {
atomic_memory_order_relaxed,
@ -29,13 +30,13 @@ atomic_fence(atomic_memory_order_t mo) {
return;
}
asm volatile("" ::: "memory");
# if defined(__i386__) || defined(__x86_64__)
#if defined(__i386__) || defined(__x86_64__)
/* This is implicit on x86. */
# elif defined(__ppc64__)
#elif defined(__ppc64__)
asm volatile("lwsync");
# elif defined(__ppc__)
#elif defined(__ppc__)
asm volatile("sync");
# elif defined(__sparc__) && defined(__arch64__)
#elif defined(__sparc__) && defined(__arch64__)
if (mo == atomic_memory_order_acquire) {
asm volatile("membar #LoadLoad | #LoadStore");
} else if (mo == atomic_memory_order_release) {
@ -43,9 +44,9 @@ atomic_fence(atomic_memory_order_t mo) {
} else {
asm volatile("membar #LoadLoad | #LoadStore | #StoreStore");
}
# else
#else
__sync_synchronize();
# endif
#endif
asm volatile("" ::: "memory");
}
@ -68,23 +69,22 @@ atomic_fence(atomic_memory_order_t mo) {
ATOMIC_INLINE void
atomic_pre_sc_load_fence() {
# if defined(__i386__) || defined(__x86_64__) || \
(defined(__sparc__) && defined(__arch64__))
#if defined(__i386__) || defined(__x86_64__) \
|| (defined(__sparc__) && defined(__arch64__))
atomic_fence(atomic_memory_order_relaxed);
# else
#else
atomic_fence(atomic_memory_order_seq_cst);
# endif
#endif
}
ATOMIC_INLINE void
atomic_post_sc_store_fence() {
# if defined(__i386__) || defined(__x86_64__) || \
(defined(__sparc__) && defined(__arch64__))
#if defined(__i386__) || defined(__x86_64__) \
|| (defined(__sparc__) && defined(__arch64__))
atomic_fence(atomic_memory_order_seq_cst);
# else
#else
atomic_fence(atomic_memory_order_relaxed);
# endif
#endif
}
/* clang-format off */
@ -164,39 +164,33 @@ atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
}
/* clang-format on */
#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \
/* unused */ lg_size) \
JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \
\
ATOMIC_INLINE type \
atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return __sync_fetch_and_add(&a->repr, val); \
} \
\
ATOMIC_INLINE type \
atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return __sync_fetch_and_sub(&a->repr, val); \
} \
\
ATOMIC_INLINE type \
atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return __sync_fetch_and_and(&a->repr, val); \
} \
\
ATOMIC_INLINE type \
atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return __sync_fetch_and_or(&a->repr, val); \
} \
\
ATOMIC_INLINE type \
atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return __sync_fetch_and_xor(&a->repr, val); \
}
#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, /* unused */ lg_size) \
JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \
\
ATOMIC_INLINE type atomic_fetch_add_##short_type( \
atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \
return __sync_fetch_and_add(&a->repr, val); \
} \
\
ATOMIC_INLINE type atomic_fetch_sub_##short_type( \
atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \
return __sync_fetch_and_sub(&a->repr, val); \
} \
\
ATOMIC_INLINE type atomic_fetch_and_##short_type( \
atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \
return __sync_fetch_and_and(&a->repr, val); \
} \
\
ATOMIC_INLINE type atomic_fetch_or_##short_type( \
atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \
return __sync_fetch_and_or(&a->repr, val); \
} \
\
ATOMIC_INLINE type atomic_fetch_xor_##short_type( \
atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \
return __sync_fetch_and_xor(&a->repr, val); \
}
#undef ATOMIC_INLINE

View file

@ -5,7 +5,8 @@
#define ATOMIC_INLINE JEMALLOC_ALWAYS_INLINE
#define ATOMIC_INIT(...) {__VA_ARGS__}
#define ATOMIC_INIT(...) \
{ __VA_ARGS__ }
typedef enum {
atomic_memory_order_relaxed,
@ -15,108 +16,104 @@ typedef enum {
atomic_memory_order_seq_cst
} atomic_memory_order_t;
typedef char atomic_repr_0_t;
typedef short atomic_repr_1_t;
typedef long atomic_repr_2_t;
typedef char atomic_repr_0_t;
typedef short atomic_repr_1_t;
typedef long atomic_repr_2_t;
typedef __int64 atomic_repr_3_t;
ATOMIC_INLINE void
atomic_fence(atomic_memory_order_t mo) {
_ReadWriteBarrier();
# if defined(_M_ARM) || defined(_M_ARM64)
#if defined(_M_ARM) || defined(_M_ARM64)
/* ARM needs a barrier for everything but relaxed. */
if (mo != atomic_memory_order_relaxed) {
MemoryBarrier();
}
# elif defined(_M_IX86) || defined (_M_X64)
#elif defined(_M_IX86) || defined(_M_X64)
/* x86 needs a barrier only for seq_cst. */
if (mo == atomic_memory_order_seq_cst) {
MemoryBarrier();
}
# else
# error "Don't know how to create atomics for this platform for MSVC."
# endif
#else
# error "Don't know how to create atomics for this platform for MSVC."
#endif
_ReadWriteBarrier();
}
#define ATOMIC_INTERLOCKED_REPR(lg_size) atomic_repr_ ## lg_size ## _t
#define ATOMIC_INTERLOCKED_REPR(lg_size) atomic_repr_##lg_size##_t
#define ATOMIC_CONCAT(a, b) ATOMIC_RAW_CONCAT(a, b)
#define ATOMIC_RAW_CONCAT(a, b) a ## b
#define ATOMIC_RAW_CONCAT(a, b) a##b
#define ATOMIC_INTERLOCKED_NAME(base_name, lg_size) ATOMIC_CONCAT( \
base_name, ATOMIC_INTERLOCKED_SUFFIX(lg_size))
#define ATOMIC_INTERLOCKED_NAME(base_name, lg_size) \
ATOMIC_CONCAT(base_name, ATOMIC_INTERLOCKED_SUFFIX(lg_size))
#define ATOMIC_INTERLOCKED_SUFFIX(lg_size) \
ATOMIC_CONCAT(ATOMIC_INTERLOCKED_SUFFIX_, lg_size)
#define ATOMIC_INTERLOCKED_SUFFIX(lg_size) \
ATOMIC_CONCAT(ATOMIC_INTERLOCKED_SUFFIX_, lg_size)
#define ATOMIC_INTERLOCKED_SUFFIX_0 8
#define ATOMIC_INTERLOCKED_SUFFIX_1 16
#define ATOMIC_INTERLOCKED_SUFFIX_2
#define ATOMIC_INTERLOCKED_SUFFIX_3 64
#define JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \
typedef struct { \
ATOMIC_INTERLOCKED_REPR(lg_size) repr; \
} atomic_##short_type##_t; \
\
ATOMIC_INLINE type \
atomic_load_##short_type(const atomic_##short_type##_t *a, \
atomic_memory_order_t mo) { \
ATOMIC_INTERLOCKED_REPR(lg_size) ret = a->repr; \
if (mo != atomic_memory_order_relaxed) { \
atomic_fence(atomic_memory_order_acquire); \
} \
return (type) ret; \
} \
\
ATOMIC_INLINE void \
atomic_store_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
if (mo != atomic_memory_order_relaxed) { \
atomic_fence(atomic_memory_order_release); \
} \
a->repr = (ATOMIC_INTERLOCKED_REPR(lg_size)) val; \
if (mo == atomic_memory_order_seq_cst) { \
atomic_fence(atomic_memory_order_seq_cst); \
} \
} \
\
ATOMIC_INLINE type \
atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchange, \
lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
} \
\
ATOMIC_INLINE bool \
atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
type *expected, type desired, atomic_memory_order_t success_mo, \
atomic_memory_order_t failure_mo) { \
ATOMIC_INTERLOCKED_REPR(lg_size) e = \
(ATOMIC_INTERLOCKED_REPR(lg_size))*expected; \
ATOMIC_INTERLOCKED_REPR(lg_size) d = \
(ATOMIC_INTERLOCKED_REPR(lg_size))desired; \
ATOMIC_INTERLOCKED_REPR(lg_size) old = \
ATOMIC_INTERLOCKED_NAME(_InterlockedCompareExchange, \
lg_size)(&a->repr, d, e); \
if (old == e) { \
return true; \
} else { \
*expected = (type)old; \
return false; \
} \
} \
\
ATOMIC_INLINE bool \
atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
type *expected, type desired, atomic_memory_order_t success_mo, \
atomic_memory_order_t failure_mo) { \
/* We implement the weak version with strong semantics. */ \
return atomic_compare_exchange_weak_##short_type(a, expected, \
desired, success_mo, failure_mo); \
}
#define JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \
typedef struct { \
ATOMIC_INTERLOCKED_REPR(lg_size) repr; \
} atomic_##short_type##_t; \
\
ATOMIC_INLINE type atomic_load_##short_type( \
const atomic_##short_type##_t *a, atomic_memory_order_t mo) { \
ATOMIC_INTERLOCKED_REPR(lg_size) ret = a->repr; \
if (mo != atomic_memory_order_relaxed) { \
atomic_fence(atomic_memory_order_acquire); \
} \
return (type)ret; \
} \
\
ATOMIC_INLINE void atomic_store_##short_type( \
atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \
if (mo != atomic_memory_order_relaxed) { \
atomic_fence(atomic_memory_order_release); \
} \
a->repr = (ATOMIC_INTERLOCKED_REPR(lg_size))val; \
if (mo == atomic_memory_order_seq_cst) { \
atomic_fence(atomic_memory_order_seq_cst); \
} \
} \
\
ATOMIC_INLINE type atomic_exchange_##short_type( \
atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \
return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchange, \
lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
} \
\
ATOMIC_INLINE bool atomic_compare_exchange_weak_##short_type( \
atomic_##short_type##_t *a, type *expected, type desired, \
atomic_memory_order_t success_mo, \
atomic_memory_order_t failure_mo) { \
ATOMIC_INTERLOCKED_REPR(lg_size) \
e = (ATOMIC_INTERLOCKED_REPR(lg_size)) * expected; \
ATOMIC_INTERLOCKED_REPR(lg_size) \
d = (ATOMIC_INTERLOCKED_REPR(lg_size))desired; \
ATOMIC_INTERLOCKED_REPR(lg_size) \
old = ATOMIC_INTERLOCKED_NAME( \
_InterlockedCompareExchange, lg_size)(&a->repr, d, e); \
if (old == e) { \
return true; \
} else { \
*expected = (type)old; \
return false; \
} \
} \
\
ATOMIC_INLINE bool atomic_compare_exchange_strong_##short_type( \
atomic_##short_type##_t *a, type *expected, type desired, \
atomic_memory_order_t success_mo, \
atomic_memory_order_t failure_mo) { \
/* We implement the weak version with strong semantics. */ \
return atomic_compare_exchange_weak_##short_type( \
a, expected, desired, success_mo, failure_mo); \
}
/* clang-format off */
#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size) \

View file

@ -6,26 +6,26 @@
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/mutex.h"
extern bool opt_background_thread;
extern size_t opt_max_background_threads;
extern malloc_mutex_t background_thread_lock;
extern atomic_b_t background_thread_enabled_state;
extern size_t n_background_threads;
extern size_t max_background_threads;
extern bool opt_background_thread;
extern size_t opt_max_background_threads;
extern malloc_mutex_t background_thread_lock;
extern atomic_b_t background_thread_enabled_state;
extern size_t n_background_threads;
extern size_t max_background_threads;
extern background_thread_info_t *background_thread_info;
bool background_thread_create(tsd_t *tsd, unsigned arena_ind);
bool background_threads_enable(tsd_t *tsd);
bool background_threads_disable(tsd_t *tsd);
bool background_thread_is_started(background_thread_info_t* info);
void background_thread_wakeup_early(background_thread_info_t *info,
nstime_t *remaining_sleep);
bool background_thread_is_started(background_thread_info_t *info);
void background_thread_wakeup_early(
background_thread_info_t *info, nstime_t *remaining_sleep);
void background_thread_prefork0(tsdn_t *tsdn);
void background_thread_prefork1(tsdn_t *tsdn);
void background_thread_postfork_parent(tsdn_t *tsdn);
void background_thread_postfork_child(tsdn_t *tsdn);
bool background_thread_stats_read(tsdn_t *tsdn,
background_thread_stats_t *stats);
bool background_thread_stats_read(
tsdn_t *tsdn, background_thread_stats_t *stats);
void background_thread_ctl_init(tsdn_t *tsdn);
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER

View file

@ -36,14 +36,14 @@ background_thread_info_get(size_t ind) {
JEMALLOC_ALWAYS_INLINE uint64_t
background_thread_wakeup_time_get(background_thread_info_t *info) {
uint64_t next_wakeup = nstime_ns(&info->next_wakeup);
assert(atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE) ==
(next_wakeup == BACKGROUND_THREAD_INDEFINITE_SLEEP));
assert(atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE)
== (next_wakeup == BACKGROUND_THREAD_INDEFINITE_SLEEP));
return next_wakeup;
}
JEMALLOC_ALWAYS_INLINE void
background_thread_wakeup_time_set(tsdn_t *tsdn, background_thread_info_t *info,
uint64_t wakeup_time) {
background_thread_wakeup_time_set(
tsdn_t *tsdn, background_thread_info_t *info, uint64_t wakeup_time) {
malloc_mutex_assert_owner(tsdn, &info->mtx);
atomic_store_b(&info->indefinite_sleep,
wakeup_time == BACKGROUND_THREAD_INDEFINITE_SLEEP, ATOMIC_RELEASE);

View file

@ -7,7 +7,7 @@
/* This file really combines "structs" and "types", but only transitionally. */
#if defined(JEMALLOC_BACKGROUND_THREAD) || defined(JEMALLOC_LAZY_LOCK)
# define JEMALLOC_PTHREAD_CREATE_WRAPPER
# define JEMALLOC_PTHREAD_CREATE_WRAPPER
#endif
#define BACKGROUND_THREAD_INDEFINITE_SLEEP UINT64_MAX
@ -35,33 +35,33 @@ typedef enum {
struct background_thread_info_s {
#ifdef JEMALLOC_BACKGROUND_THREAD
/* Background thread is pthread specific. */
pthread_t thread;
pthread_cond_t cond;
pthread_t thread;
pthread_cond_t cond;
#endif
malloc_mutex_t mtx;
background_thread_state_t state;
malloc_mutex_t mtx;
background_thread_state_t state;
/* When true, it means no wakeup scheduled. */
atomic_b_t indefinite_sleep;
atomic_b_t indefinite_sleep;
/* Next scheduled wakeup time (absolute time in ns). */
nstime_t next_wakeup;
nstime_t next_wakeup;
/*
* Since the last background thread run, newly added number of pages
* that need to be purged by the next wakeup. This is adjusted on
* epoch advance, and is used to determine whether we should signal the
* background thread to wake up earlier.
*/
size_t npages_to_purge_new;
size_t npages_to_purge_new;
/* Stats: total number of runs since started. */
uint64_t tot_n_runs;
uint64_t tot_n_runs;
/* Stats: total sleep time since started. */
nstime_t tot_sleep_time;
nstime_t tot_sleep_time;
};
typedef struct background_thread_info_s background_thread_info_t;
struct background_thread_stats_s {
size_t num_threads;
uint64_t num_runs;
nstime_t run_interval;
size_t num_threads;
uint64_t num_runs;
nstime_t run_interval;
mutex_prof_data_t max_counter_per_bg_thd;
};
typedef struct background_thread_stats_s background_thread_stats_t;

View file

@ -13,7 +13,7 @@
#define BASE_BLOCK_MIN_ALIGN ((size_t)2 << 20)
enum metadata_thp_mode_e {
metadata_thp_disabled = 0,
metadata_thp_disabled = 0,
/*
* Lazily enable hugepage for metadata. To avoid high RSS caused by THP
* + low usage arena (i.e. THP becomes a significant percentage), the
@ -22,15 +22,15 @@ enum metadata_thp_mode_e {
* arena), "auto" behaves the same as "always", i.e. madvise hugepage
* right away.
*/
metadata_thp_auto = 1,
metadata_thp_always = 2,
metadata_thp_auto = 1,
metadata_thp_always = 2,
metadata_thp_mode_limit = 3
};
typedef enum metadata_thp_mode_e metadata_thp_mode_t;
#define METADATA_THP_DEFAULT metadata_thp_disabled
extern metadata_thp_mode_t opt_metadata_thp;
extern const char *const metadata_thp_mode_names[];
extern const char *const metadata_thp_mode_names[];
/* Embedded at the beginning of every block of base-managed virtual memory. */
typedef struct base_block_s base_block_t;
@ -102,24 +102,24 @@ metadata_thp_enabled(void) {
}
base_t *b0get(void);
base_t *base_new(tsdn_t *tsdn, unsigned ind,
const extent_hooks_t *extent_hooks, bool metadata_use_hooks);
void base_delete(tsdn_t *tsdn, base_t *base);
ehooks_t *base_ehooks_get(base_t *base);
ehooks_t *base_ehooks_get_for_metadata(base_t *base);
extent_hooks_t *base_extent_hooks_set(base_t *base,
extent_hooks_t *extent_hooks);
void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment);
base_t *base_new(tsdn_t *tsdn, unsigned ind, const extent_hooks_t *extent_hooks,
bool metadata_use_hooks);
void base_delete(tsdn_t *tsdn, base_t *base);
ehooks_t *base_ehooks_get(base_t *base);
ehooks_t *base_ehooks_get_for_metadata(base_t *base);
extent_hooks_t *base_extent_hooks_set(
base_t *base, extent_hooks_t *extent_hooks);
void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment);
edata_t *base_alloc_edata(tsdn_t *tsdn, base_t *base);
void *base_alloc_rtree(tsdn_t *tsdn, base_t *base, size_t size);
void *b0_alloc_tcache_stack(tsdn_t *tsdn, size_t size);
void b0_dalloc_tcache_stack(tsdn_t *tsdn, void *tcache_stack);
void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated,
size_t *edata_allocated, size_t *rtree_allocated, size_t *resident,
size_t *mapped, size_t *n_thp);
void base_prefork(tsdn_t *tsdn, base_t *base);
void base_postfork_parent(tsdn_t *tsdn, base_t *base);
void base_postfork_child(tsdn_t *tsdn, base_t *base);
bool base_boot(tsdn_t *tsdn);
void *base_alloc_rtree(tsdn_t *tsdn, base_t *base, size_t size);
void *b0_alloc_tcache_stack(tsdn_t *tsdn, size_t size);
void b0_dalloc_tcache_stack(tsdn_t *tsdn, void *tcache_stack);
void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated,
size_t *edata_allocated, size_t *rtree_allocated, size_t *resident,
size_t *mapped, size_t *n_thp);
void base_prefork(tsdn_t *tsdn, base_t *base);
void base_postfork_parent(tsdn_t *tsdn, base_t *base);
void base_postfork_child(tsdn_t *tsdn, base_t *base);
bool base_boot(tsdn_t *tsdn);
#endif /* JEMALLOC_INTERNAL_BASE_H */

View file

@ -5,7 +5,7 @@
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/mutex.h"
#define BATCHER_NO_IDX ((size_t)-1)
#define BATCHER_NO_IDX ((size_t) - 1)
typedef struct batcher_s batcher_t;
struct batcher_s {
@ -14,9 +14,9 @@ struct batcher_s {
* togehter, along with the front of the mutex. The end of the mutex is
* only touched if there's contention.
*/
atomic_zu_t nelems;
size_t nelems_max;
size_t npushes;
atomic_zu_t nelems;
size_t nelems_max;
size_t npushes;
malloc_mutex_t mtx;
};
@ -27,8 +27,8 @@ void batcher_init(batcher_t *batcher, size_t nelems_max);
* BATCHER_NO_IDX if no index is free. If the former, the caller must call
* batcher_push_end once done.
*/
size_t batcher_push_begin(tsdn_t *tsdn, batcher_t *batcher,
size_t elems_to_push);
size_t batcher_push_begin(
tsdn_t *tsdn, batcher_t *batcher, size_t elems_to_push);
void batcher_push_end(tsdn_t *tsdn, batcher_t *batcher);
/*
@ -37,7 +37,7 @@ void batcher_push_end(tsdn_t *tsdn, batcher_t *batcher);
*/
size_t batcher_pop_begin(tsdn_t *tsdn, batcher_t *batcher);
size_t batcher_pop_get_pushes(tsdn_t *tsdn, batcher_t *batcher);
void batcher_pop_end(tsdn_t *tsdn, batcher_t *batcher);
void batcher_pop_end(tsdn_t *tsdn, batcher_t *batcher);
void batcher_prefork(tsdn_t *tsdn, batcher_t *batcher);
void batcher_postfork_parent(tsdn_t *tsdn, batcher_t *batcher);

View file

@ -14,8 +14,8 @@
#ifdef JEMALLOC_JET
extern void (*bin_batching_test_after_push_hook)(size_t idx);
extern void (*bin_batching_test_mid_pop_hook)(size_t elems_to_pop);
extern void (*bin_batching_test_after_unlock_hook)(unsigned slab_dalloc_count,
bool list_empty);
extern void (*bin_batching_test_after_unlock_hook)(
unsigned slab_dalloc_count, bool list_empty);
#endif
#ifdef JEMALLOC_JET
@ -50,8 +50,8 @@ bin_batching_test_after_unlock(unsigned slab_dalloc_count, bool list_empty) {
(void)list_empty;
#ifdef JEMALLOC_JET
if (bin_batching_test_after_unlock_hook != NULL) {
bin_batching_test_after_unlock_hook(slab_dalloc_count,
list_empty);
bin_batching_test_after_unlock_hook(
slab_dalloc_count, list_empty);
}
#endif
}
@ -63,13 +63,13 @@ bin_batching_test_after_unlock(unsigned slab_dalloc_count, bool list_empty) {
typedef struct bin_s bin_t;
struct bin_s {
/* All operations on bin_t fields require lock ownership. */
malloc_mutex_t lock;
malloc_mutex_t lock;
/*
* Bin statistics. These get touched every time the lock is acquired,
* so put them close by in the hopes of getting some cache locality.
*/
bin_stats_t stats;
bin_stats_t stats;
/*
* Current slab being used to service allocations of this bin's size
@ -77,29 +77,29 @@ struct bin_s {
* slabcur is reassigned, the previous slab must be deallocated or
* inserted into slabs_{nonfull,full}.
*/
edata_t *slabcur;
edata_t *slabcur;
/*
* Heap of non-full slabs. This heap is used to assure that new
* allocations come from the non-full slab that is oldest/lowest in
* memory.
*/
edata_heap_t slabs_nonfull;
edata_heap_t slabs_nonfull;
/* List used to track full slabs. */
edata_list_active_t slabs_full;
edata_list_active_t slabs_full;
};
typedef struct bin_remote_free_data_s bin_remote_free_data_t;
struct bin_remote_free_data_s {
void *ptr;
void *ptr;
edata_t *slab;
};
typedef struct bin_with_batch_s bin_with_batch_t;
struct bin_with_batch_s {
bin_t bin;
batcher_t remote_frees;
bin_t bin;
batcher_t remote_frees;
bin_remote_free_data_t remote_free_data[BIN_REMOTE_FREE_ELEMS_MAX];
};

View file

@ -26,22 +26,22 @@
typedef struct bin_info_s bin_info_t;
struct bin_info_s {
/* Size of regions in a slab for this bin's size class. */
size_t reg_size;
size_t reg_size;
/* Total size of a slab for this bin's size class. */
size_t slab_size;
size_t slab_size;
/* Total number of regions in a slab for this bin's size class. */
uint32_t nregs;
uint32_t nregs;
/* Number of sharded bins in each arena for this size class. */
uint32_t n_shards;
uint32_t n_shards;
/*
* Metadata used to manipulate bitmaps for slabs associated with this
* bin.
*/
bitmap_info_t bitmap_info;
bitmap_info_t bitmap_info;
};
/* The maximum size a size class can be and still get batching behavior. */
@ -51,7 +51,7 @@ extern size_t opt_bin_info_remote_free_max_batch;
// The max number of pending elems (across all batches)
extern size_t opt_bin_info_remote_free_max;
extern szind_t bin_info_nbatched_sizes;
extern szind_t bin_info_nbatched_sizes;
extern unsigned bin_info_nbatched_bins;
extern unsigned bin_info_nunbatched_bins;

View file

@ -12,52 +12,52 @@ struct bin_stats_s {
* many times, resulting many increments to nrequests, but only one
* each to nmalloc and ndalloc.
*/
uint64_t nmalloc;
uint64_t ndalloc;
uint64_t nmalloc;
uint64_t ndalloc;
/*
* Number of allocation requests that correspond to the size of this
* bin. This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
uint64_t nrequests;
uint64_t nrequests;
/*
* Current number of regions of this size class, including regions
* currently cached by tcache.
*/
size_t curregs;
size_t curregs;
/* Number of tcache fills from this bin. */
uint64_t nfills;
uint64_t nfills;
/* Number of tcache flushes to this bin. */
uint64_t nflushes;
uint64_t nflushes;
/* Total number of slabs created for this bin's size class. */
uint64_t nslabs;
uint64_t nslabs;
/*
* Total number of slabs reused by extracting them from the slabs heap
* for this bin's size class.
*/
uint64_t reslabs;
uint64_t reslabs;
/* Current number of slabs in this bin. */
size_t curslabs;
size_t curslabs;
/* Current size of nonfull slabs heap in this bin. */
size_t nonfull_slabs;
size_t nonfull_slabs;
uint64_t batch_pops;
uint64_t batch_failed_pushes;
uint64_t batch_pushes;
uint64_t batch_pushed_elems;
uint64_t batch_pops;
uint64_t batch_failed_pushes;
uint64_t batch_pushes;
uint64_t batch_pushed_elems;
};
typedef struct bin_stats_data_s bin_stats_data_t;
struct bin_stats_data_s {
bin_stats_t stats_data;
bin_stats_t stats_data;
mutex_prof_data_t mutex_data;
};
#endif /* JEMALLOC_INTERNAL_BIN_STATS_H */

View file

@ -8,7 +8,10 @@
#define N_BIN_SHARDS_DEFAULT 1
/* Used in TSD static initializer only. Real init in arena_bind(). */
#define TSD_BINSHARDS_ZERO_INITIALIZER {{UINT8_MAX}}
#define TSD_BINSHARDS_ZERO_INITIALIZER \
{ \
{ UINT8_MAX } \
}
typedef struct tsd_binshards_s tsd_binshards_t;
struct tsd_binshards_s {

View file

@ -5,9 +5,9 @@
#include "jemalloc/internal/assert.h"
/* Sanity check. */
#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \
#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \
|| !defined(JEMALLOC_INTERNAL_FFS)
# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure
# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure
#endif
/*
@ -110,15 +110,17 @@ fls_u(unsigned x) {
}
#elif defined(_MSC_VER)
#if LG_SIZEOF_PTR == 3
#define DO_BSR64(bit, x) _BitScanReverse64(&bit, x)
#else
# if LG_SIZEOF_PTR == 3
# define DO_BSR64(bit, x) _BitScanReverse64(&bit, x)
# else
/*
* This never actually runs; we're just dodging a compiler error for the
* never-taken branch where sizeof(void *) == 8.
*/
#define DO_BSR64(bit, x) bit = 0; unreachable()
#endif
# define DO_BSR64(bit, x) \
bit = 0; \
unreachable()
# endif
/* clang-format off */
#define DO_FLS(x) do { \
@ -164,8 +166,8 @@ fls_u(unsigned x) {
DO_FLS(x);
}
#undef DO_FLS
#undef DO_BSR64
# undef DO_FLS
# undef DO_BSR64
#else
static inline unsigned
@ -185,7 +187,7 @@ fls_u(unsigned x) {
#endif
#if LG_SIZEOF_LONG_LONG > 3
# error "Haven't implemented popcount for 16-byte ints."
# error "Haven't implemented popcount for 16-byte ints."
#endif
/* clang-format off */
@ -284,7 +286,7 @@ popcount_llu(unsigned long long bitmap) {
*/
static inline size_t
cfs_lu(unsigned long* bitmap) {
cfs_lu(unsigned long *bitmap) {
util_assume(*bitmap != 0);
size_t bit = ffs_lu(*bitmap);
*bitmap ^= ZU(1) << bit;
@ -300,7 +302,7 @@ ffs_zu(size_t x) {
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
return ffs_llu(x);
#else
#error No implementation for size_t ffs()
# error No implementation for size_t ffs()
#endif
}
@ -313,11 +315,10 @@ fls_zu(size_t x) {
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
return fls_llu(x);
#else
#error No implementation for size_t fls()
# error No implementation for size_t fls()
#endif
}
static inline unsigned
ffs_u64(uint64_t x) {
#if LG_SIZEOF_LONG == 3
@ -325,7 +326,7 @@ ffs_u64(uint64_t x) {
#elif LG_SIZEOF_LONG_LONG == 3
return ffs_llu(x);
#else
#error No implementation for 64-bit ffs()
# error No implementation for 64-bit ffs()
#endif
}
@ -336,7 +337,7 @@ fls_u64(uint64_t x) {
#elif LG_SIZEOF_LONG_LONG == 3
return fls_llu(x);
#else
#error No implementation for 64-bit fls()
# error No implementation for 64-bit fls()
#endif
}
@ -345,7 +346,7 @@ ffs_u32(uint32_t x) {
#if LG_SIZEOF_INT == 2
return ffs_u(x);
#else
#error No implementation for 32-bit ffs()
# error No implementation for 32-bit ffs()
#endif
}
@ -354,7 +355,7 @@ fls_u32(uint32_t x) {
#if LG_SIZEOF_INT == 2
return fls_u(x);
#else
#error No implementation for 32-bit fls()
# error No implementation for 32-bit fls()
#endif
}
@ -375,7 +376,7 @@ pow2_ceil_u64(uint64_t x) {
static inline uint32_t
pow2_ceil_u32(uint32_t x) {
if (unlikely(x <= 1)) {
return x;
return x;
}
size_t msb_on_index = fls_u32(x - 1);
/* As above. */
@ -413,13 +414,16 @@ lg_ceil(size_t x) {
#define LG_FLOOR_2(x) (x < (1ULL << 1) ? LG_FLOOR_1(x) : 1 + LG_FLOOR_1(x >> 1))
#define LG_FLOOR_4(x) (x < (1ULL << 2) ? LG_FLOOR_2(x) : 2 + LG_FLOOR_2(x >> 2))
#define LG_FLOOR_8(x) (x < (1ULL << 4) ? LG_FLOOR_4(x) : 4 + LG_FLOOR_4(x >> 4))
#define LG_FLOOR_16(x) (x < (1ULL << 8) ? LG_FLOOR_8(x) : 8 + LG_FLOOR_8(x >> 8))
#define LG_FLOOR_32(x) (x < (1ULL << 16) ? LG_FLOOR_16(x) : 16 + LG_FLOOR_16(x >> 16))
#define LG_FLOOR_64(x) (x < (1ULL << 32) ? LG_FLOOR_32(x) : 32 + LG_FLOOR_32(x >> 32))
#define LG_FLOOR_16(x) \
(x < (1ULL << 8) ? LG_FLOOR_8(x) : 8 + LG_FLOOR_8(x >> 8))
#define LG_FLOOR_32(x) \
(x < (1ULL << 16) ? LG_FLOOR_16(x) : 16 + LG_FLOOR_16(x >> 16))
#define LG_FLOOR_64(x) \
(x < (1ULL << 32) ? LG_FLOOR_32(x) : 32 + LG_FLOOR_32(x >> 32))
#if LG_SIZEOF_PTR == 2
# define LG_FLOOR(x) LG_FLOOR_32((x))
# define LG_FLOOR(x) LG_FLOOR_32((x))
#else
# define LG_FLOOR(x) LG_FLOOR_64((x))
# define LG_FLOOR(x) LG_FLOOR_64((x))
#endif
#define LG_CEIL(x) (LG_FLOOR(x) + (((x) & ((x) - 1)) == 0 ? 0 : 1))

View file

@ -6,22 +6,22 @@
#include "jemalloc/internal/sc.h"
typedef unsigned long bitmap_t;
#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
#if SC_LG_SLAB_MAXREGS > LG_CEIL(SC_NSIZES)
/* Maximum bitmap bit count is determined by maximum regions per slab. */
# define LG_BITMAP_MAXBITS SC_LG_SLAB_MAXREGS
# define LG_BITMAP_MAXBITS SC_LG_SLAB_MAXREGS
#else
/* Maximum bitmap bit count is determined by number of extent size classes. */
# define LG_BITMAP_MAXBITS LG_CEIL(SC_NSIZES)
# define LG_BITMAP_MAXBITS LG_CEIL(SC_NSIZES)
#endif
#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS)
#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS)
/* Number of bits per group. */
#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3)
#define BITMAP_GROUP_NBITS (1U << LG_BITMAP_GROUP_NBITS)
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3)
#define BITMAP_GROUP_NBITS (1U << LG_BITMAP_GROUP_NBITS)
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS - 1)
/*
* Do some analysis on how big the bitmap is before we use a tree. For a brute
@ -29,67 +29,64 @@ typedef unsigned long bitmap_t;
* use a tree instead.
*/
#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3
# define BITMAP_USE_TREE
# define BITMAP_USE_TREE
#endif
/* Number of groups required to store a given number of bits. */
#define BITMAP_BITS2GROUPS(nbits) \
(((nbits) + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
#define BITMAP_BITS2GROUPS(nbits) \
(((nbits) + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
/*
* Number of groups required at a particular level for a given number of bits.
*/
#define BITMAP_GROUPS_L0(nbits) \
BITMAP_BITS2GROUPS(nbits)
#define BITMAP_GROUPS_L1(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits))
#define BITMAP_GROUPS_L2(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))
#define BITMAP_GROUPS_L3(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
BITMAP_BITS2GROUPS((nbits)))))
#define BITMAP_GROUPS_L4(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))))
#define BITMAP_GROUPS_L0(nbits) BITMAP_BITS2GROUPS(nbits)
#define BITMAP_GROUPS_L1(nbits) BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits))
#define BITMAP_GROUPS_L2(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))
#define BITMAP_GROUPS_L3(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits)))))
#define BITMAP_GROUPS_L4(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))))
/*
* Assuming the number of levels, number of groups required for a given number
* of bits.
*/
#define BITMAP_GROUPS_1_LEVEL(nbits) \
BITMAP_GROUPS_L0(nbits)
#define BITMAP_GROUPS_2_LEVEL(nbits) \
(BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits))
#define BITMAP_GROUPS_3_LEVEL(nbits) \
(BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits))
#define BITMAP_GROUPS_4_LEVEL(nbits) \
(BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits))
#define BITMAP_GROUPS_5_LEVEL(nbits) \
(BITMAP_GROUPS_4_LEVEL(nbits) + BITMAP_GROUPS_L4(nbits))
#define BITMAP_GROUPS_1_LEVEL(nbits) BITMAP_GROUPS_L0(nbits)
#define BITMAP_GROUPS_2_LEVEL(nbits) \
(BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits))
#define BITMAP_GROUPS_3_LEVEL(nbits) \
(BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits))
#define BITMAP_GROUPS_4_LEVEL(nbits) \
(BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits))
#define BITMAP_GROUPS_5_LEVEL(nbits) \
(BITMAP_GROUPS_4_LEVEL(nbits) + BITMAP_GROUPS_L4(nbits))
/*
* Maximum number of groups required to support LG_BITMAP_MAXBITS.
*/
#ifdef BITMAP_USE_TREE
#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_1_LEVEL(nbits)
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_2_LEVEL(nbits)
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_3_LEVEL(nbits)
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_4_LEVEL(nbits)
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 5
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_5_LEVEL(nbits)
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_5_LEVEL(BITMAP_MAXBITS)
#else
# error "Unsupported bitmap size"
#endif
# if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_1_LEVEL(nbits)
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS)
# elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_2_LEVEL(nbits)
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS)
# elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_3_LEVEL(nbits)
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS)
# elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_4_LEVEL(nbits)
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS)
# elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 5
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_5_LEVEL(nbits)
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_5_LEVEL(BITMAP_MAXBITS)
# else
# error "Unsupported bitmap size"
# endif
/*
* Maximum number of levels possible. This could be statically computed based
@ -105,42 +102,53 @@ typedef unsigned long bitmap_t;
* unused trailing entries in bitmap_info_t structures; the bitmaps themselves
* are not impacted.
*/
#define BITMAP_MAX_LEVELS 5
# define BITMAP_MAX_LEVELS 5
#define BITMAP_INFO_INITIALIZER(nbits) { \
/* nbits. */ \
nbits, \
/* nlevels. */ \
(BITMAP_GROUPS_L0(nbits) > BITMAP_GROUPS_L1(nbits)) + \
(BITMAP_GROUPS_L1(nbits) > BITMAP_GROUPS_L2(nbits)) + \
(BITMAP_GROUPS_L2(nbits) > BITMAP_GROUPS_L3(nbits)) + \
(BITMAP_GROUPS_L3(nbits) > BITMAP_GROUPS_L4(nbits)) + 1, \
/* levels. */ \
{ \
{0}, \
{BITMAP_GROUPS_L0(nbits)}, \
{BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \
{BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) + \
BITMAP_GROUPS_L0(nbits)}, \
{BITMAP_GROUPS_L3(nbits) + BITMAP_GROUPS_L2(nbits) + \
BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \
{BITMAP_GROUPS_L4(nbits) + BITMAP_GROUPS_L3(nbits) + \
BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) \
+ BITMAP_GROUPS_L0(nbits)} \
} \
}
# define BITMAP_INFO_INITIALIZER(nbits) \
{ \
/* nbits. */ \
nbits, /* nlevels. */ \
(BITMAP_GROUPS_L0(nbits) \
> BITMAP_GROUPS_L1(nbits)) \
+ (BITMAP_GROUPS_L1(nbits) \
> BITMAP_GROUPS_L2(nbits)) \
+ (BITMAP_GROUPS_L2(nbits) \
> BITMAP_GROUPS_L3(nbits)) \
+ (BITMAP_GROUPS_L3(nbits) \
> BITMAP_GROUPS_L4(nbits)) \
+ 1, /* levels. */ \
{ \
{0}, {BITMAP_GROUPS_L0(nbits)}, \
{BITMAP_GROUPS_L1(nbits) \
+ BITMAP_GROUPS_L0(nbits)}, \
{BITMAP_GROUPS_L2(nbits) \
+ BITMAP_GROUPS_L1(nbits) \
+ BITMAP_GROUPS_L0(nbits)}, \
{BITMAP_GROUPS_L3(nbits) \
+ BITMAP_GROUPS_L2(nbits) \
+ BITMAP_GROUPS_L1(nbits) \
+ BITMAP_GROUPS_L0(nbits)}, \
{ \
BITMAP_GROUPS_L4(nbits) \
+ BITMAP_GROUPS_L3(nbits) \
+ BITMAP_GROUPS_L2(nbits) \
+ BITMAP_GROUPS_L1(nbits) \
+ BITMAP_GROUPS_L0(nbits) \
} \
} \
}
#else /* BITMAP_USE_TREE */
#define BITMAP_GROUPS(nbits) BITMAP_BITS2GROUPS(nbits)
#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS)
# define BITMAP_GROUPS(nbits) BITMAP_BITS2GROUPS(nbits)
# define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS)
#define BITMAP_INFO_INITIALIZER(nbits) { \
/* nbits. */ \
nbits, \
/* ngroups. */ \
BITMAP_BITS2GROUPS(nbits) \
}
# define BITMAP_INFO_INITIALIZER(nbits) \
{ \
/* nbits. */ \
nbits, /* ngroups. */ \
BITMAP_BITS2GROUPS(nbits) \
}
#endif /* BITMAP_USE_TREE */
@ -161,21 +169,21 @@ typedef struct bitmap_info_s {
* Only the first (nlevels+1) elements are used, and levels are ordered
* bottom to top (e.g. the bottom level is stored in levels[0]).
*/
bitmap_level_t levels[BITMAP_MAX_LEVELS+1];
#else /* BITMAP_USE_TREE */
bitmap_level_t levels[BITMAP_MAX_LEVELS + 1];
#else /* BITMAP_USE_TREE */
/* Number of groups necessary for nbits. */
size_t ngroups;
#endif /* BITMAP_USE_TREE */
} bitmap_info_t;
void bitmap_info_init(bitmap_info_t *binfo, size_t nbits);
void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill);
void bitmap_info_init(bitmap_info_t *binfo, size_t nbits);
void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill);
size_t bitmap_size(const bitmap_info_t *binfo);
static inline bool
bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) {
#ifdef BITMAP_USE_TREE
size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
bitmap_t rg = bitmap[rgoff];
/* The bitmap is full iff the root group is 0. */
return (rg == 0);
@ -193,7 +201,7 @@ bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) {
static inline bool
bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
size_t goff;
size_t goff;
bitmap_t g;
assert(bit < binfo->nbits);
@ -204,9 +212,9 @@ bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
static inline void
bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
size_t goff;
size_t goff;
bitmap_t *gp;
bitmap_t g;
bitmap_t g;
assert(bit < binfo->nbits);
assert(!bitmap_get(bitmap, binfo, bit));
@ -245,12 +253,13 @@ bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) {
#ifdef BITMAP_USE_TREE
size_t bit = 0;
for (unsigned level = binfo->nlevels; level--;) {
size_t lg_bits_per_group = (LG_BITMAP_GROUP_NBITS * (level +
1));
bitmap_t group = bitmap[binfo->levels[level].group_offset + (bit
>> lg_bits_per_group)];
unsigned group_nmask = (unsigned)(((min_bit > bit) ? (min_bit -
bit) : 0) >> (lg_bits_per_group - LG_BITMAP_GROUP_NBITS));
size_t lg_bits_per_group = (LG_BITMAP_GROUP_NBITS
* (level + 1));
bitmap_t group = bitmap[binfo->levels[level].group_offset
+ (bit >> lg_bits_per_group)];
unsigned group_nmask =
(unsigned)(((min_bit > bit) ? (min_bit - bit) : 0)
>> (lg_bits_per_group - LG_BITMAP_GROUP_NBITS));
assert(group_nmask <= BITMAP_GROUP_NBITS);
bitmap_t group_mask = ~((1LU << group_nmask) - 1);
bitmap_t group_masked = group & group_mask;
@ -273,16 +282,16 @@ bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) {
}
return bitmap_ffu(bitmap, binfo, sib_base);
}
bit += ((size_t)ffs_lu(group_masked)) <<
(lg_bits_per_group - LG_BITMAP_GROUP_NBITS);
bit += ((size_t)ffs_lu(group_masked))
<< (lg_bits_per_group - LG_BITMAP_GROUP_NBITS);
}
assert(bit >= min_bit);
assert(bit < binfo->nbits);
return bit;
#else
size_t i = min_bit >> LG_BITMAP_GROUP_NBITS;
bitmap_t g = bitmap[i] & ~((1LU << (min_bit & BITMAP_GROUP_NBITS_MASK))
- 1);
size_t i = min_bit >> LG_BITMAP_GROUP_NBITS;
bitmap_t g = bitmap[i]
& ~((1LU << (min_bit & BITMAP_GROUP_NBITS_MASK)) - 1);
size_t bit;
while (1) {
if (g != 0) {
@ -302,7 +311,7 @@ bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) {
/* sfu: set first unset. */
static inline size_t
bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) {
size_t bit;
size_t bit;
bitmap_t g;
unsigned i;
@ -332,9 +341,9 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) {
static inline void
bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
size_t goff;
bitmap_t *gp;
bitmap_t g;
size_t goff;
bitmap_t *gp;
bitmap_t g;
UNUSED bool propagate;
assert(bit < binfo->nbits);

View file

@ -16,21 +16,21 @@
typedef struct {
write_cb_t *write_cb;
void *cbopaque;
char *buf;
size_t buf_size;
size_t buf_end;
bool internal_buf;
void *cbopaque;
char *buf;
size_t buf_size;
size_t buf_end;
bool internal_buf;
} buf_writer_t;
bool buf_writer_init(tsdn_t *tsdn, buf_writer_t *buf_writer,
write_cb_t *write_cb, void *cbopaque, char *buf, size_t buf_len);
void buf_writer_flush(buf_writer_t *buf_writer);
bool buf_writer_init(tsdn_t *tsdn, buf_writer_t *buf_writer,
write_cb_t *write_cb, void *cbopaque, char *buf, size_t buf_len);
void buf_writer_flush(buf_writer_t *buf_writer);
write_cb_t buf_writer_cb;
void buf_writer_terminate(tsdn_t *tsdn, buf_writer_t *buf_writer);
void buf_writer_terminate(tsdn_t *tsdn, buf_writer_t *buf_writer);
typedef ssize_t (read_cb_t)(void *read_cbopaque, void *buf, size_t limit);
void buf_writer_pipe(buf_writer_t *buf_writer, read_cb_t *read_cb,
void *read_cbopaque);
typedef ssize_t(read_cb_t)(void *read_cbopaque, void *buf, size_t limit);
void buf_writer_pipe(
buf_writer_t *buf_writer, read_cb_t *read_cb, void *read_cbopaque);
#endif /* JEMALLOC_INTERNAL_BUF_WRITER_H */

View file

@ -45,8 +45,8 @@ extern const uintptr_t disabled_bin;
* 1 << (sizeof(cache_bin_sz_t) * 8)
* bytes spread across pointer sized objects to get the maximum.
*/
#define CACHE_BIN_NCACHED_MAX (((size_t)1 << sizeof(cache_bin_sz_t) * 8) \
/ sizeof(void *) - 1)
#define CACHE_BIN_NCACHED_MAX \
(((size_t)1 << sizeof(cache_bin_sz_t) * 8) / sizeof(void *) - 1)
/*
* This lives inside the cache_bin (for locality reasons), and is initialized
@ -152,8 +152,8 @@ struct cache_bin_array_descriptor_s {
};
static inline void
cache_bin_array_descriptor_init(cache_bin_array_descriptor_t *descriptor,
cache_bin_t *bins) {
cache_bin_array_descriptor_init(
cache_bin_array_descriptor_t *descriptor, cache_bin_t *bins) {
ql_elm_new(descriptor, link);
descriptor->bins = bins;
}
@ -222,7 +222,8 @@ cache_bin_ncached_max_get(cache_bin_t *bin) {
* with later.
*/
static inline void
cache_bin_assert_earlier(cache_bin_t *bin, cache_bin_sz_t earlier, cache_bin_sz_t later) {
cache_bin_assert_earlier(
cache_bin_t *bin, cache_bin_sz_t earlier, cache_bin_sz_t later) {
if (earlier > later) {
assert(bin->low_bits_full > bin->low_bits_empty);
}
@ -285,8 +286,8 @@ static inline void **
cache_bin_empty_position_get(cache_bin_t *bin) {
cache_bin_sz_t diff = cache_bin_diff(bin,
(cache_bin_sz_t)(uintptr_t)bin->stack_head, bin->low_bits_empty);
byte_t *empty_bits = (byte_t *)bin->stack_head + diff;
void **ret = (void **)empty_bits;
byte_t *empty_bits = (byte_t *)bin->stack_head + diff;
void **ret = (void **)empty_bits;
assert(ret >= bin->stack_head);
@ -305,8 +306,8 @@ cache_bin_empty_position_get(cache_bin_t *bin) {
*/
static inline cache_bin_sz_t
cache_bin_low_bits_low_bound_get(cache_bin_t *bin) {
return (cache_bin_sz_t)bin->low_bits_empty -
cache_bin_ncached_max_get(bin) * sizeof(void *);
return (cache_bin_sz_t)bin->low_bits_empty
- cache_bin_ncached_max_get(bin) * sizeof(void *);
}
/*
@ -317,7 +318,7 @@ cache_bin_low_bits_low_bound_get(cache_bin_t *bin) {
static inline void **
cache_bin_low_bound_get(cache_bin_t *bin) {
cache_bin_sz_t ncached_max = cache_bin_ncached_max_get(bin);
void **ret = cache_bin_empty_position_get(bin) - ncached_max;
void **ret = cache_bin_empty_position_get(bin) - ncached_max;
assert(ret <= bin->stack_head);
return ret;
@ -340,8 +341,8 @@ cache_bin_assert_empty(cache_bin_t *bin) {
*/
static inline cache_bin_sz_t
cache_bin_low_water_get_internal(cache_bin_t *bin) {
return cache_bin_diff(bin, bin->low_bits_low_water,
bin->low_bits_empty) / sizeof(void *);
return cache_bin_diff(bin, bin->low_bits_low_water, bin->low_bits_empty)
/ sizeof(void *);
}
/* Returns the numeric value of low water in [0, ncached]. */
@ -351,7 +352,8 @@ cache_bin_low_water_get(cache_bin_t *bin) {
assert(low_water <= cache_bin_ncached_max_get(bin));
assert(low_water <= cache_bin_ncached_get_local(bin));
cache_bin_assert_earlier(bin, (cache_bin_sz_t)(uintptr_t)bin->stack_head,
cache_bin_assert_earlier(bin,
(cache_bin_sz_t)(uintptr_t)bin->stack_head,
bin->low_bits_low_water);
return low_water;
@ -390,9 +392,9 @@ cache_bin_alloc_impl(cache_bin_t *bin, bool *success, bool adjust_low_water) {
* This may read from the empty position; however the loaded value won't
* be used. It's safe because the stack has one more slot reserved.
*/
void *ret = *bin->stack_head;
void *ret = *bin->stack_head;
cache_bin_sz_t low_bits = (cache_bin_sz_t)(uintptr_t)bin->stack_head;
void **new_head = bin->stack_head + 1;
void **new_head = bin->stack_head + 1;
/*
* Note that the low water mark is at most empty; if we pass this check,
@ -455,7 +457,8 @@ cache_bin_alloc_batch(cache_bin_t *bin, size_t num, void **out) {
JEMALLOC_ALWAYS_INLINE bool
cache_bin_full(cache_bin_t *bin) {
return ((cache_bin_sz_t)(uintptr_t)bin->stack_head == bin->low_bits_full);
return (
(cache_bin_sz_t)(uintptr_t)bin->stack_head == bin->low_bits_full);
}
/*
@ -469,9 +472,9 @@ cache_bin_dalloc_safety_checks(cache_bin_t *bin, void *ptr) {
}
cache_bin_sz_t ncached = cache_bin_ncached_get_internal(bin);
unsigned max_scan = opt_debug_double_free_max_scan < ncached
? opt_debug_double_free_max_scan
: ncached;
unsigned max_scan = opt_debug_double_free_max_scan < ncached
? opt_debug_double_free_max_scan
: ncached;
void **cur = bin->stack_head;
void **limit = cur + max_scan;
@ -516,9 +519,11 @@ cache_bin_stash(cache_bin_t *bin, void *ptr) {
}
/* Stash at the full position, in the [full, head) range. */
cache_bin_sz_t low_bits_head = (cache_bin_sz_t)(uintptr_t)bin->stack_head;
cache_bin_sz_t low_bits_head = (cache_bin_sz_t)(uintptr_t)
bin->stack_head;
/* Wraparound handled as well. */
cache_bin_sz_t diff = cache_bin_diff(bin, bin->low_bits_full, low_bits_head);
cache_bin_sz_t diff = cache_bin_diff(
bin, bin->low_bits_full, low_bits_head);
*(void **)((byte_t *)bin->stack_head - diff) = ptr;
assert(!cache_bin_full(bin));
@ -532,18 +537,21 @@ cache_bin_stash(cache_bin_t *bin, void *ptr) {
JEMALLOC_ALWAYS_INLINE cache_bin_sz_t
cache_bin_nstashed_get_internal(cache_bin_t *bin) {
cache_bin_sz_t ncached_max = cache_bin_ncached_max_get(bin);
cache_bin_sz_t low_bits_low_bound = cache_bin_low_bits_low_bound_get(bin);
cache_bin_sz_t low_bits_low_bound = cache_bin_low_bits_low_bound_get(
bin);
cache_bin_sz_t n = cache_bin_diff(bin, low_bits_low_bound,
bin->low_bits_full) / sizeof(void *);
cache_bin_sz_t n = cache_bin_diff(
bin, low_bits_low_bound, bin->low_bits_full)
/ sizeof(void *);
assert(n <= ncached_max);
if (config_debug && n != 0) {
/* Below are for assertions only. */
void **low_bound = cache_bin_low_bound_get(bin);
assert((cache_bin_sz_t)(uintptr_t)low_bound == low_bits_low_bound);
assert(
(cache_bin_sz_t)(uintptr_t)low_bound == low_bits_low_bound);
void *stashed = *(low_bound + n - 1);
bool aligned = cache_bin_nonfast_aligned(stashed);
bool aligned = cache_bin_nonfast_aligned(stashed);
#ifdef JEMALLOC_JET
/* Allow arbitrary pointers to be stashed in tests. */
aligned = true;
@ -582,16 +590,17 @@ cache_bin_nstashed_get_local(cache_bin_t *bin) {
* they help access values that will not be concurrently modified.
*/
static inline void
cache_bin_nitems_get_remote(cache_bin_t *bin, cache_bin_sz_t *ncached,
cache_bin_sz_t *nstashed) {
cache_bin_nitems_get_remote(
cache_bin_t *bin, cache_bin_sz_t *ncached, cache_bin_sz_t *nstashed) {
/* Racy version of cache_bin_ncached_get_internal. */
cache_bin_sz_t diff = bin->low_bits_empty -
(cache_bin_sz_t)(uintptr_t)bin->stack_head;
cache_bin_sz_t diff = bin->low_bits_empty
- (cache_bin_sz_t)(uintptr_t)bin->stack_head;
cache_bin_sz_t n = diff / sizeof(void *);
*ncached = n;
/* Racy version of cache_bin_nstashed_get_internal. */
cache_bin_sz_t low_bits_low_bound = cache_bin_low_bits_low_bound_get(bin);
cache_bin_sz_t low_bits_low_bound = cache_bin_low_bits_low_bound_get(
bin);
n = (bin->low_bits_full - low_bits_low_bound) / sizeof(void *);
*nstashed = n;
/*
@ -616,7 +625,8 @@ struct cache_bin_fill_ctl_s {
* This is to avoid stack overflow when we do batch edata look up, which
* reserves a nflush * sizeof(emap_batch_lookup_result_t) stack variable.
*/
#define CACHE_BIN_NFLUSH_BATCH_MAX ((VARIABLE_ARRAY_SIZE_MAX >> LG_SIZEOF_PTR) - 1)
#define CACHE_BIN_NFLUSH_BATCH_MAX \
((VARIABLE_ARRAY_SIZE_MAX >> LG_SIZEOF_PTR) - 1)
/*
* Filling and flushing are done in batch, on arrays of void *s. For filling,
@ -638,7 +648,7 @@ struct cache_bin_fill_ctl_s {
typedef struct cache_bin_ptr_array_s cache_bin_ptr_array_t;
struct cache_bin_ptr_array_s {
cache_bin_sz_t n;
void **ptr;
void **ptr;
};
/*
@ -650,17 +660,17 @@ struct cache_bin_ptr_array_s {
* representations is easy (since they'll require an alloca in the calling
* frame).
*/
#define CACHE_BIN_PTR_ARRAY_DECLARE(name, nval) \
cache_bin_ptr_array_t name; \
name.n = (nval)
#define CACHE_BIN_PTR_ARRAY_DECLARE(name, nval) \
cache_bin_ptr_array_t name; \
name.n = (nval)
/*
* Start a fill. The bin must be empty, and This must be followed by a
* finish_fill call before doing any alloc/dalloc operations on the bin.
*/
static inline void
cache_bin_init_ptr_array_for_fill(cache_bin_t *bin, cache_bin_ptr_array_t *arr,
cache_bin_sz_t nfill) {
cache_bin_init_ptr_array_for_fill(
cache_bin_t *bin, cache_bin_ptr_array_t *arr, cache_bin_sz_t nfill) {
cache_bin_assert_empty(bin);
arr->ptr = cache_bin_empty_position_get(bin) - nfill;
}
@ -671,8 +681,8 @@ cache_bin_init_ptr_array_for_fill(cache_bin_t *bin, cache_bin_ptr_array_t *arr,
* case of OOM.
*/
static inline void
cache_bin_finish_fill(cache_bin_t *bin, cache_bin_ptr_array_t *arr,
cache_bin_sz_t nfilled) {
cache_bin_finish_fill(
cache_bin_t *bin, cache_bin_ptr_array_t *arr, cache_bin_sz_t nfilled) {
cache_bin_assert_empty(bin);
void **empty_position = cache_bin_empty_position_get(bin);
if (nfilled < arr->n) {
@ -687,19 +697,18 @@ cache_bin_finish_fill(cache_bin_t *bin, cache_bin_ptr_array_t *arr,
* everything we give them.
*/
static inline void
cache_bin_init_ptr_array_for_flush(cache_bin_t *bin,
cache_bin_ptr_array_t *arr, cache_bin_sz_t nflush) {
cache_bin_init_ptr_array_for_flush(
cache_bin_t *bin, cache_bin_ptr_array_t *arr, cache_bin_sz_t nflush) {
arr->ptr = cache_bin_empty_position_get(bin) - nflush;
assert(cache_bin_ncached_get_local(bin) == 0
|| *arr->ptr != NULL);
assert(cache_bin_ncached_get_local(bin) == 0 || *arr->ptr != NULL);
}
static inline void
cache_bin_finish_flush(cache_bin_t *bin, cache_bin_ptr_array_t *arr,
cache_bin_sz_t nflushed) {
cache_bin_finish_flush(
cache_bin_t *bin, cache_bin_ptr_array_t *arr, cache_bin_sz_t nflushed) {
unsigned rem = cache_bin_ncached_get_local(bin) - nflushed;
memmove(bin->stack_head + nflushed, bin->stack_head,
rem * sizeof(void *));
memmove(
bin->stack_head + nflushed, bin->stack_head, rem * sizeof(void *));
bin->stack_head += nflushed;
cache_bin_low_water_adjust(bin);
}
@ -728,14 +737,14 @@ cache_bin_finish_flush_stashed(cache_bin_t *bin) {
* Initialize a cache_bin_info to represent up to the given number of items in
* the cache_bins it is associated with.
*/
void cache_bin_info_init(cache_bin_info_t *bin_info,
cache_bin_sz_t ncached_max);
void cache_bin_info_init(
cache_bin_info_t *bin_info, cache_bin_sz_t ncached_max);
/*
* Given an array of initialized cache_bin_info_ts, determine how big an
* allocation is required to initialize a full set of cache_bin_ts.
*/
void cache_bin_info_compute_alloc(const cache_bin_info_t *infos,
szind_t ninfos, size_t *size, size_t *alignment);
void cache_bin_info_compute_alloc(const cache_bin_info_t *infos, szind_t ninfos,
size_t *size, size_t *alignment);
/*
* Actually initialize some cache bins. Callers should allocate the backing
@ -747,8 +756,8 @@ void cache_bin_info_compute_alloc(const cache_bin_info_t *infos,
void cache_bin_preincrement(const cache_bin_info_t *infos, szind_t ninfos,
void *alloc, size_t *cur_offset);
void cache_bin_postincrement(void *alloc, size_t *cur_offset);
void cache_bin_init(cache_bin_t *bin, const cache_bin_info_t *info,
void *alloc, size_t *cur_offset);
void cache_bin_init(cache_bin_t *bin, const cache_bin_info_t *info, void *alloc,
size_t *cur_offset);
void cache_bin_init_disabled(cache_bin_t *bin, cache_bin_sz_t ncached_max);
bool cache_bin_stack_use_thp(void);

View file

@ -22,8 +22,8 @@
#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
/* Typedefs to allow easy function pointer passing. */
typedef void ckh_hash_t (const void *, size_t[2]);
typedef bool ckh_keycomp_t (const void *, const void *);
typedef void ckh_hash_t(const void *, size_t[2]);
typedef bool ckh_keycomp_t(const void *, const void *);
/* Hash table cell. */
typedef struct {
@ -56,7 +56,7 @@ typedef struct {
unsigned lg_curbuckets;
/* Hash and comparison functions. */
ckh_hash_t *hash;
ckh_hash_t *hash;
ckh_keycomp_t *keycomp;
/* Hash table with 2^lg_curbuckets buckets. */
@ -89,8 +89,8 @@ bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
* the key and value, and doesn't do any lifetime management.
*/
bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data);
bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
void **data);
bool ckh_remove(
tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, void **data);
bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data);
/* Some useful hash and comparison functions for strings and pointers. */

View file

@ -8,7 +8,7 @@
typedef struct counter_accum_s {
LOCKEDINT_MTX_DECLARE(mtx)
locked_u64_t accumbytes;
uint64_t interval;
uint64_t interval;
} counter_accum_t;
JEMALLOC_ALWAYS_INLINE bool

View file

@ -13,7 +13,7 @@
#include "jemalloc/internal/stats.h"
/* Maximum ctl tree depth. */
#define CTL_MAX_DEPTH 7
#define CTL_MAX_DEPTH 7
#define CTL_MULTI_SETTING_MAX_LEN 1000
typedef struct ctl_node_s {
@ -21,37 +21,37 @@ typedef struct ctl_node_s {
} ctl_node_t;
typedef struct ctl_named_node_s {
ctl_node_t node;
ctl_node_t node;
const char *name;
/* If (nchildren == 0), this is a terminal node. */
size_t nchildren;
size_t nchildren;
const ctl_node_t *children;
int (*ctl)(tsd_t *, const size_t *, size_t, void *, size_t *, void *,
size_t);
int (*ctl)(
tsd_t *, const size_t *, size_t, void *, size_t *, void *, size_t);
} ctl_named_node_t;
typedef struct ctl_indexed_node_s {
struct ctl_node_s node;
const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t,
size_t);
const ctl_named_node_t *(*index)(
tsdn_t *, const size_t *, size_t, size_t);
} ctl_indexed_node_t;
typedef struct ctl_arena_stats_s {
arena_stats_t astats;
/* Aggregate stats for small size classes, based on bin stats. */
size_t allocated_small;
size_t allocated_small;
uint64_t nmalloc_small;
uint64_t ndalloc_small;
uint64_t nrequests_small;
uint64_t nfills_small;
uint64_t nflushes_small;
bin_stats_data_t bstats[SC_NBINS];
bin_stats_data_t bstats[SC_NBINS];
arena_stats_large_t lstats[SC_NSIZES - SC_NBINS];
pac_estats_t estats[SC_NPSIZES];
hpa_shard_stats_t hpastats;
sec_stats_t secstats;
pac_estats_t estats[SC_NPSIZES];
hpa_shard_stats_t hpastats;
sec_stats_t secstats;
} ctl_arena_stats_t;
typedef struct ctl_stats_s {
@ -72,17 +72,17 @@ typedef struct ctl_stats_s {
typedef struct ctl_arena_s ctl_arena_t;
struct ctl_arena_s {
unsigned arena_ind;
bool initialized;
bool initialized;
ql_elm(ctl_arena_t) destroyed_link;
/* Basic stats, supported even if !config_stats. */
unsigned nthreads;
unsigned nthreads;
const char *dss;
ssize_t dirty_decay_ms;
ssize_t muzzy_decay_ms;
size_t pactive;
size_t pdirty;
size_t pmuzzy;
ssize_t dirty_decay_ms;
ssize_t muzzy_decay_ms;
size_t pactive;
size_t pdirty;
size_t pmuzzy;
/* NULL if !config_stats. */
ctl_arena_stats_t *astats;
@ -107,60 +107,67 @@ int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
int ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp);
int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
int ctl_mibnametomib(tsd_t *tsd, size_t *mib, size_t miblen, const char *name,
size_t *miblenp);
int ctl_bymibname(tsd_t *tsd, size_t *mib, size_t miblen, const char *name,
size_t *miblenp, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
int ctl_mibnametomib(
tsd_t *tsd, size_t *mib, size_t miblen, const char *name, size_t *miblenp);
int ctl_bymibname(tsd_t *tsd, size_t *mib, size_t miblen, const char *name,
size_t *miblenp, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
bool ctl_boot(void);
void ctl_prefork(tsdn_t *tsdn);
void ctl_postfork_parent(tsdn_t *tsdn);
void ctl_postfork_child(tsdn_t *tsdn);
void ctl_mtx_assert_held(tsdn_t *tsdn);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
!= 0) { \
malloc_printf( \
"<jemalloc>: Failure in xmallctl(\"%s\", ...)\n", \
name); \
abort(); \
} \
} while (0)
#define xmallctl(name, oldp, oldlenp, newp, newlen) \
do { \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) != 0) { \
malloc_printf( \
"<jemalloc>: Failure in xmallctl(\"%s\", ...)\n", \
name); \
abort(); \
} \
} while (0)
#define xmallctlnametomib(name, mibp, miblenp) do { \
if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \
malloc_printf("<jemalloc>: Failure in " \
"xmallctlnametomib(\"%s\", ...)\n", name); \
abort(); \
} \
} while (0)
#define xmallctlnametomib(name, mibp, miblenp) \
do { \
if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \
malloc_printf( \
"<jemalloc>: Failure in " \
"xmallctlnametomib(\"%s\", ...)\n", \
name); \
abort(); \
} \
} while (0)
#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \
newlen) != 0) { \
malloc_write( \
"<jemalloc>: Failure in xmallctlbymib()\n"); \
abort(); \
} \
} while (0)
#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) \
do { \
if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) \
!= 0) { \
malloc_write( \
"<jemalloc>: Failure in xmallctlbymib()\n"); \
abort(); \
} \
} while (0)
#define xmallctlmibnametomib(mib, miblen, name, miblenp) do { \
if (ctl_mibnametomib(tsd_fetch(), mib, miblen, name, miblenp) \
!= 0) { \
malloc_write( \
"<jemalloc>: Failure in ctl_mibnametomib()\n"); \
abort(); \
} \
} while (0)
#define xmallctlmibnametomib(mib, miblen, name, miblenp) \
do { \
if (ctl_mibnametomib(tsd_fetch(), mib, miblen, name, miblenp) \
!= 0) { \
malloc_write( \
"<jemalloc>: Failure in ctl_mibnametomib()\n"); \
abort(); \
} \
} while (0)
#define xmallctlbymibname(mib, miblen, name, miblenp, oldp, oldlenp, \
newp, newlen) do { \
if (ctl_bymibname(tsd_fetch(), mib, miblen, name, miblenp, \
oldp, oldlenp, newp, newlen) != 0) { \
malloc_write( \
"<jemalloc>: Failure in ctl_bymibname()\n"); \
abort(); \
} \
} while (0)
#define xmallctlbymibname( \
mib, miblen, name, miblenp, oldp, oldlenp, newp, newlen) \
do { \
if (ctl_bymibname(tsd_fetch(), mib, miblen, name, miblenp, \
oldp, oldlenp, newp, newlen) \
!= 0) { \
malloc_write( \
"<jemalloc>: Failure in ctl_bymibname()\n"); \
abort(); \
} \
} while (0)
#endif /* JEMALLOC_INTERNAL_CTL_H */

View file

@ -5,7 +5,7 @@
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/smoothstep.h"
#define DECAY_UNBOUNDED_TIME_TO_PURGE ((uint64_t)-1)
#define DECAY_UNBOUNDED_TIME_TO_PURGE ((uint64_t) - 1)
/*
* The decay_t computes the number of pages we should purge at any given time.
@ -168,12 +168,12 @@ void decay_reinit(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms);
/*
* Compute how many of 'npages_new' pages we would need to purge in 'time'.
*/
uint64_t decay_npages_purge_in(decay_t *decay, nstime_t *time,
size_t npages_new);
uint64_t decay_npages_purge_in(
decay_t *decay, nstime_t *time, size_t npages_new);
/* Returns true if the epoch advanced and there are pages to purge. */
bool decay_maybe_advance_epoch(decay_t *decay, nstime_t *new_time,
size_t current_npages);
bool decay_maybe_advance_epoch(
decay_t *decay, nstime_t *new_time, size_t current_npages);
/*
* Calculates wait time until a number of pages in the interval
@ -182,7 +182,7 @@ bool decay_maybe_advance_epoch(decay_t *decay, nstime_t *new_time,
* Returns number of nanoseconds or DECAY_UNBOUNDED_TIME_TO_PURGE in case of
* indefinite wait.
*/
uint64_t decay_ns_until_purge(decay_t *decay, size_t npages_current,
uint64_t npages_threshold);
uint64_t decay_ns_until_purge(
decay_t *decay, size_t npages_current, uint64_t npages_threshold);
#endif /* JEMALLOC_INTERNAL_DECAY_H */

View file

@ -9,8 +9,8 @@
typedef struct ecache_s ecache_t;
struct ecache_s {
malloc_mutex_t mtx;
eset_t eset;
eset_t guarded_eset;
eset_t eset;
eset_t guarded_eset;
/* All stored extents must be in the same state. */
extent_state_t state;
/* The index of the ehooks the ecache is associated with. */
@ -24,22 +24,22 @@ struct ecache_s {
static inline size_t
ecache_npages_get(ecache_t *ecache) {
return eset_npages_get(&ecache->eset) +
eset_npages_get(&ecache->guarded_eset);
return eset_npages_get(&ecache->eset)
+ eset_npages_get(&ecache->guarded_eset);
}
/* Get the number of extents in the given page size index. */
static inline size_t
ecache_nextents_get(ecache_t *ecache, pszind_t ind) {
return eset_nextents_get(&ecache->eset, ind) +
eset_nextents_get(&ecache->guarded_eset, ind);
return eset_nextents_get(&ecache->eset, ind)
+ eset_nextents_get(&ecache->guarded_eset, ind);
}
/* Get the sum total bytes of the extents in the given page size index. */
static inline size_t
ecache_nbytes_get(ecache_t *ecache, pszind_t ind) {
return eset_nbytes_get(&ecache->eset, ind) +
eset_nbytes_get(&ecache->guarded_eset, ind);
return eset_nbytes_get(&ecache->eset, ind)
+ eset_nbytes_get(&ecache->guarded_eset, ind);
}
static inline unsigned

View file

@ -30,9 +30,9 @@
#define ESET_ENUMERATE_MAX_NUM 32
enum extent_state_e {
extent_state_active = 0,
extent_state_dirty = 1,
extent_state_muzzy = 2,
extent_state_active = 0,
extent_state_dirty = 1,
extent_state_muzzy = 2,
extent_state_retained = 3,
extent_state_transition = 4, /* States below are intermediate. */
extent_state_merging = 5,
@ -42,7 +42,7 @@ typedef enum extent_state_e extent_state_t;
enum extent_head_state_e {
EXTENT_NOT_HEAD,
EXTENT_IS_HEAD /* See comments in ehooks_default_merge_impl(). */
EXTENT_IS_HEAD /* See comments in ehooks_default_merge_impl(). */
};
typedef enum extent_head_state_e extent_head_state_t;
@ -50,25 +50,22 @@ typedef enum extent_head_state_e extent_head_state_t;
* Which implementation of the page allocator interface, (PAI, defined in
* pai.h) owns the given extent?
*/
enum extent_pai_e {
EXTENT_PAI_PAC = 0,
EXTENT_PAI_HPA = 1
};
enum extent_pai_e { EXTENT_PAI_PAC = 0, EXTENT_PAI_HPA = 1 };
typedef enum extent_pai_e extent_pai_t;
struct e_prof_info_s {
/* Time when this was allocated. */
nstime_t e_prof_alloc_time;
nstime_t e_prof_alloc_time;
/* Allocation request size. */
size_t e_prof_alloc_size;
size_t e_prof_alloc_size;
/* Points to a prof_tctx_t. */
atomic_p_t e_prof_tctx;
atomic_p_t e_prof_tctx;
/*
* Points to a prof_recent_t for the allocation; NULL
* means the recent allocation record no longer exists.
* Protected by prof_recent_alloc_mtx.
*/
atomic_p_t e_prof_recent_alloc;
atomic_p_t e_prof_recent_alloc;
};
typedef struct e_prof_info_s e_prof_info_t;
@ -85,13 +82,13 @@ typedef struct e_prof_info_s e_prof_info_t;
*/
typedef struct edata_map_info_s edata_map_info_t;
struct edata_map_info_s {
bool slab;
bool slab;
szind_t szind;
};
typedef struct edata_cmp_summary_s edata_cmp_summary_t;
struct edata_cmp_summary_s {
uint64_t sn;
uint64_t sn;
uintptr_t addr;
};
@ -149,55 +146,72 @@ struct edata_s {
*
* bin_shard: the shard of the bin from which this extent came.
*/
uint64_t e_bits;
#define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT))
uint64_t e_bits;
#define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) \
((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) \
<< (CURRENT_FIELD_SHIFT))
#define EDATA_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS
#define EDATA_BITS_ARENA_SHIFT 0
#define EDATA_BITS_ARENA_MASK MASK(EDATA_BITS_ARENA_WIDTH, EDATA_BITS_ARENA_SHIFT)
#define EDATA_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS
#define EDATA_BITS_ARENA_SHIFT 0
#define EDATA_BITS_ARENA_MASK \
MASK(EDATA_BITS_ARENA_WIDTH, EDATA_BITS_ARENA_SHIFT)
#define EDATA_BITS_SLAB_WIDTH 1
#define EDATA_BITS_SLAB_SHIFT (EDATA_BITS_ARENA_WIDTH + EDATA_BITS_ARENA_SHIFT)
#define EDATA_BITS_SLAB_MASK MASK(EDATA_BITS_SLAB_WIDTH, EDATA_BITS_SLAB_SHIFT)
#define EDATA_BITS_SLAB_WIDTH 1
#define EDATA_BITS_SLAB_SHIFT (EDATA_BITS_ARENA_WIDTH + EDATA_BITS_ARENA_SHIFT)
#define EDATA_BITS_SLAB_MASK MASK(EDATA_BITS_SLAB_WIDTH, EDATA_BITS_SLAB_SHIFT)
#define EDATA_BITS_COMMITTED_WIDTH 1
#define EDATA_BITS_COMMITTED_SHIFT (EDATA_BITS_SLAB_WIDTH + EDATA_BITS_SLAB_SHIFT)
#define EDATA_BITS_COMMITTED_MASK MASK(EDATA_BITS_COMMITTED_WIDTH, EDATA_BITS_COMMITTED_SHIFT)
#define EDATA_BITS_COMMITTED_WIDTH 1
#define EDATA_BITS_COMMITTED_SHIFT \
(EDATA_BITS_SLAB_WIDTH + EDATA_BITS_SLAB_SHIFT)
#define EDATA_BITS_COMMITTED_MASK \
MASK(EDATA_BITS_COMMITTED_WIDTH, EDATA_BITS_COMMITTED_SHIFT)
#define EDATA_BITS_PAI_WIDTH 1
#define EDATA_BITS_PAI_SHIFT (EDATA_BITS_COMMITTED_WIDTH + EDATA_BITS_COMMITTED_SHIFT)
#define EDATA_BITS_PAI_MASK MASK(EDATA_BITS_PAI_WIDTH, EDATA_BITS_PAI_SHIFT)
#define EDATA_BITS_PAI_WIDTH 1
#define EDATA_BITS_PAI_SHIFT \
(EDATA_BITS_COMMITTED_WIDTH + EDATA_BITS_COMMITTED_SHIFT)
#define EDATA_BITS_PAI_MASK MASK(EDATA_BITS_PAI_WIDTH, EDATA_BITS_PAI_SHIFT)
#define EDATA_BITS_ZEROED_WIDTH 1
#define EDATA_BITS_ZEROED_SHIFT (EDATA_BITS_PAI_WIDTH + EDATA_BITS_PAI_SHIFT)
#define EDATA_BITS_ZEROED_MASK MASK(EDATA_BITS_ZEROED_WIDTH, EDATA_BITS_ZEROED_SHIFT)
#define EDATA_BITS_ZEROED_WIDTH 1
#define EDATA_BITS_ZEROED_SHIFT (EDATA_BITS_PAI_WIDTH + EDATA_BITS_PAI_SHIFT)
#define EDATA_BITS_ZEROED_MASK \
MASK(EDATA_BITS_ZEROED_WIDTH, EDATA_BITS_ZEROED_SHIFT)
#define EDATA_BITS_GUARDED_WIDTH 1
#define EDATA_BITS_GUARDED_SHIFT (EDATA_BITS_ZEROED_WIDTH + EDATA_BITS_ZEROED_SHIFT)
#define EDATA_BITS_GUARDED_MASK MASK(EDATA_BITS_GUARDED_WIDTH, EDATA_BITS_GUARDED_SHIFT)
#define EDATA_BITS_GUARDED_WIDTH 1
#define EDATA_BITS_GUARDED_SHIFT \
(EDATA_BITS_ZEROED_WIDTH + EDATA_BITS_ZEROED_SHIFT)
#define EDATA_BITS_GUARDED_MASK \
MASK(EDATA_BITS_GUARDED_WIDTH, EDATA_BITS_GUARDED_SHIFT)
#define EDATA_BITS_STATE_WIDTH 3
#define EDATA_BITS_STATE_SHIFT (EDATA_BITS_GUARDED_WIDTH + EDATA_BITS_GUARDED_SHIFT)
#define EDATA_BITS_STATE_MASK MASK(EDATA_BITS_STATE_WIDTH, EDATA_BITS_STATE_SHIFT)
#define EDATA_BITS_STATE_WIDTH 3
#define EDATA_BITS_STATE_SHIFT \
(EDATA_BITS_GUARDED_WIDTH + EDATA_BITS_GUARDED_SHIFT)
#define EDATA_BITS_STATE_MASK \
MASK(EDATA_BITS_STATE_WIDTH, EDATA_BITS_STATE_SHIFT)
#define EDATA_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES)
#define EDATA_BITS_SZIND_SHIFT (EDATA_BITS_STATE_WIDTH + EDATA_BITS_STATE_SHIFT)
#define EDATA_BITS_SZIND_MASK MASK(EDATA_BITS_SZIND_WIDTH, EDATA_BITS_SZIND_SHIFT)
#define EDATA_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES)
#define EDATA_BITS_SZIND_SHIFT (EDATA_BITS_STATE_WIDTH + EDATA_BITS_STATE_SHIFT)
#define EDATA_BITS_SZIND_MASK \
MASK(EDATA_BITS_SZIND_WIDTH, EDATA_BITS_SZIND_SHIFT)
#define EDATA_BITS_NFREE_WIDTH (SC_LG_SLAB_MAXREGS + 1)
#define EDATA_BITS_NFREE_SHIFT (EDATA_BITS_SZIND_WIDTH + EDATA_BITS_SZIND_SHIFT)
#define EDATA_BITS_NFREE_MASK MASK(EDATA_BITS_NFREE_WIDTH, EDATA_BITS_NFREE_SHIFT)
#define EDATA_BITS_NFREE_WIDTH (SC_LG_SLAB_MAXREGS + 1)
#define EDATA_BITS_NFREE_SHIFT (EDATA_BITS_SZIND_WIDTH + EDATA_BITS_SZIND_SHIFT)
#define EDATA_BITS_NFREE_MASK \
MASK(EDATA_BITS_NFREE_WIDTH, EDATA_BITS_NFREE_SHIFT)
#define EDATA_BITS_BINSHARD_WIDTH 6
#define EDATA_BITS_BINSHARD_SHIFT (EDATA_BITS_NFREE_WIDTH + EDATA_BITS_NFREE_SHIFT)
#define EDATA_BITS_BINSHARD_MASK MASK(EDATA_BITS_BINSHARD_WIDTH, EDATA_BITS_BINSHARD_SHIFT)
#define EDATA_BITS_BINSHARD_WIDTH 6
#define EDATA_BITS_BINSHARD_SHIFT \
(EDATA_BITS_NFREE_WIDTH + EDATA_BITS_NFREE_SHIFT)
#define EDATA_BITS_BINSHARD_MASK \
MASK(EDATA_BITS_BINSHARD_WIDTH, EDATA_BITS_BINSHARD_SHIFT)
#define EDATA_BITS_IS_HEAD_WIDTH 1
#define EDATA_BITS_IS_HEAD_SHIFT (EDATA_BITS_BINSHARD_WIDTH + EDATA_BITS_BINSHARD_SHIFT)
#define EDATA_BITS_IS_HEAD_MASK MASK(EDATA_BITS_IS_HEAD_WIDTH, EDATA_BITS_IS_HEAD_SHIFT)
#define EDATA_BITS_IS_HEAD_SHIFT \
(EDATA_BITS_BINSHARD_WIDTH + EDATA_BITS_BINSHARD_SHIFT)
#define EDATA_BITS_IS_HEAD_MASK \
MASK(EDATA_BITS_IS_HEAD_WIDTH, EDATA_BITS_IS_HEAD_SHIFT)
/* Pointer to the extent that this structure is responsible for. */
void *e_addr;
void *e_addr;
union {
/*
@ -207,11 +221,11 @@ struct edata_s {
*
* ssssssss [...] ssssssss ssssnnnn nnnnnnnn
*/
size_t e_size_esn;
#define EDATA_SIZE_MASK ((size_t)~(PAGE-1))
#define EDATA_ESN_MASK ((size_t)PAGE-1)
size_t e_size_esn;
#define EDATA_SIZE_MASK ((size_t) ~(PAGE - 1))
#define EDATA_ESN_MASK ((size_t)PAGE - 1)
/* Base extent size, which may not be a multiple of PAGE. */
size_t e_bsize;
size_t e_bsize;
};
/*
@ -232,7 +246,7 @@ struct edata_s {
* List linkage used when the edata_t is active; either in
* arena's large allocations or bin_t's slabs_full.
*/
ql_elm(edata_t) ql_link_active;
ql_elm(edata_t) ql_link_active;
/*
* Pairing heap linkage. Used whenever the extent is inactive
* (in the page allocators), or when it is active and in
@ -240,7 +254,7 @@ struct edata_s {
* extent and sitting in an edata_cache.
*/
union {
edata_heap_link_t heap_link;
edata_heap_link_t heap_link;
edata_avail_link_t avail_link;
};
};
@ -253,10 +267,10 @@ struct edata_s {
*/
ql_elm(edata_t) ql_link_inactive;
/* Small region slab metadata. */
slab_data_t e_slab_data;
slab_data_t e_slab_data;
/* Profiling data, used for large objects. */
e_prof_info_t e_prof_info;
e_prof_info_t e_prof_info;
};
};
@ -265,8 +279,8 @@ TYPED_LIST(edata_list_inactive, edata_t, ql_link_inactive)
static inline unsigned
edata_arena_ind_get(const edata_t *edata) {
unsigned arena_ind = (unsigned)((edata->e_bits &
EDATA_BITS_ARENA_MASK) >> EDATA_BITS_ARENA_SHIFT);
unsigned arena_ind = (unsigned)((edata->e_bits & EDATA_BITS_ARENA_MASK)
>> EDATA_BITS_ARENA_SHIFT);
assert(arena_ind < MALLOCX_ARENA_LIMIT);
return arena_ind;
@ -274,8 +288,8 @@ edata_arena_ind_get(const edata_t *edata) {
static inline szind_t
edata_szind_get_maybe_invalid(const edata_t *edata) {
szind_t szind = (szind_t)((edata->e_bits & EDATA_BITS_SZIND_MASK) >>
EDATA_BITS_SZIND_SHIFT);
szind_t szind = (szind_t)((edata->e_bits & EDATA_BITS_SZIND_MASK)
>> EDATA_BITS_SZIND_SHIFT);
assert(szind <= SC_NSIZES);
return szind;
}
@ -318,8 +332,8 @@ edata_usize_get(const edata_t *edata) {
if (!sz_large_size_classes_disabled() || szind < SC_NBINS) {
size_t usize_from_ind = sz_index2size(szind);
if (!sz_large_size_classes_disabled() &&
usize_from_ind >= SC_LARGE_MINCLASS) {
if (!sz_large_size_classes_disabled()
&& usize_from_ind >= SC_LARGE_MINCLASS) {
size_t size = (edata->e_size_esn & EDATA_SIZE_MASK);
assert(size > sz_large_pad);
size_t usize_from_size = size - sz_large_pad;
@ -341,8 +355,9 @@ edata_usize_get(const edata_t *edata) {
static inline unsigned
edata_binshard_get(const edata_t *edata) {
unsigned binshard = (unsigned)((edata->e_bits &
EDATA_BITS_BINSHARD_MASK) >> EDATA_BITS_BINSHARD_SHIFT);
unsigned binshard = (unsigned)((edata->e_bits
& EDATA_BITS_BINSHARD_MASK)
>> EDATA_BITS_BINSHARD_SHIFT);
assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
return binshard;
}
@ -354,58 +369,58 @@ edata_sn_get(const edata_t *edata) {
static inline extent_state_t
edata_state_get(const edata_t *edata) {
return (extent_state_t)((edata->e_bits & EDATA_BITS_STATE_MASK) >>
EDATA_BITS_STATE_SHIFT);
return (extent_state_t)((edata->e_bits & EDATA_BITS_STATE_MASK)
>> EDATA_BITS_STATE_SHIFT);
}
static inline bool
edata_guarded_get(const edata_t *edata) {
return (bool)((edata->e_bits & EDATA_BITS_GUARDED_MASK) >>
EDATA_BITS_GUARDED_SHIFT);
return (bool)((edata->e_bits & EDATA_BITS_GUARDED_MASK)
>> EDATA_BITS_GUARDED_SHIFT);
}
static inline bool
edata_zeroed_get(const edata_t *edata) {
return (bool)((edata->e_bits & EDATA_BITS_ZEROED_MASK) >>
EDATA_BITS_ZEROED_SHIFT);
return (bool)((edata->e_bits & EDATA_BITS_ZEROED_MASK)
>> EDATA_BITS_ZEROED_SHIFT);
}
static inline bool
edata_committed_get(const edata_t *edata) {
return (bool)((edata->e_bits & EDATA_BITS_COMMITTED_MASK) >>
EDATA_BITS_COMMITTED_SHIFT);
return (bool)((edata->e_bits & EDATA_BITS_COMMITTED_MASK)
>> EDATA_BITS_COMMITTED_SHIFT);
}
static inline extent_pai_t
edata_pai_get(const edata_t *edata) {
return (extent_pai_t)((edata->e_bits & EDATA_BITS_PAI_MASK) >>
EDATA_BITS_PAI_SHIFT);
return (extent_pai_t)((edata->e_bits & EDATA_BITS_PAI_MASK)
>> EDATA_BITS_PAI_SHIFT);
}
static inline bool
edata_slab_get(const edata_t *edata) {
return (bool)((edata->e_bits & EDATA_BITS_SLAB_MASK) >>
EDATA_BITS_SLAB_SHIFT);
return (bool)((edata->e_bits & EDATA_BITS_SLAB_MASK)
>> EDATA_BITS_SLAB_SHIFT);
}
static inline unsigned
edata_nfree_get(const edata_t *edata) {
assert(edata_slab_get(edata));
return (unsigned)((edata->e_bits & EDATA_BITS_NFREE_MASK) >>
EDATA_BITS_NFREE_SHIFT);
return (unsigned)((edata->e_bits & EDATA_BITS_NFREE_MASK)
>> EDATA_BITS_NFREE_SHIFT);
}
static inline void *
edata_base_get(const edata_t *edata) {
assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) ||
!edata_slab_get(edata));
assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr)
|| !edata_slab_get(edata));
return PAGE_ADDR2BASE(edata->e_addr);
}
static inline void *
edata_addr_get(const edata_t *edata) {
assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) ||
!edata_slab_get(edata));
assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr)
|| !edata_slab_get(edata));
return edata->e_addr;
}
@ -437,14 +452,14 @@ edata_before_get(const edata_t *edata) {
static inline void *
edata_last_get(const edata_t *edata) {
return (void *)((byte_t *)edata_base_get(edata) +
edata_size_get(edata) - PAGE);
return (void *)((byte_t *)edata_base_get(edata) + edata_size_get(edata)
- PAGE);
}
static inline void *
edata_past_get(const edata_t *edata) {
return (void *)((byte_t *)edata_base_get(edata) +
edata_size_get(edata));
return (
void *)((byte_t *)edata_base_get(edata) + edata_size_get(edata));
}
static inline slab_data_t *
@ -461,8 +476,8 @@ edata_slab_data_get_const(const edata_t *edata) {
static inline prof_tctx_t *
edata_prof_tctx_get(const edata_t *edata) {
return (prof_tctx_t *)atomic_load_p(&edata->e_prof_info.e_prof_tctx,
ATOMIC_ACQUIRE);
return (prof_tctx_t *)atomic_load_p(
&edata->e_prof_info.e_prof_tctx, ATOMIC_ACQUIRE);
}
static inline const nstime_t *
@ -483,16 +498,16 @@ edata_prof_recent_alloc_get_dont_call_directly(const edata_t *edata) {
static inline void
edata_arena_ind_set(edata_t *edata, unsigned arena_ind) {
edata->e_bits = (edata->e_bits & ~EDATA_BITS_ARENA_MASK) |
((uint64_t)arena_ind << EDATA_BITS_ARENA_SHIFT);
edata->e_bits = (edata->e_bits & ~EDATA_BITS_ARENA_MASK)
| ((uint64_t)arena_ind << EDATA_BITS_ARENA_SHIFT);
}
static inline void
edata_binshard_set(edata_t *edata, unsigned binshard) {
/* The assertion assumes szind is set already. */
assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
edata->e_bits = (edata->e_bits & ~EDATA_BITS_BINSHARD_MASK) |
((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT);
edata->e_bits = (edata->e_bits & ~EDATA_BITS_BINSHARD_MASK)
| ((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT);
}
static inline void
@ -508,8 +523,8 @@ edata_size_set(edata_t *edata, size_t size) {
static inline void
edata_esn_set(edata_t *edata, size_t esn) {
edata->e_size_esn = (edata->e_size_esn & ~EDATA_ESN_MASK) | (esn &
EDATA_ESN_MASK);
edata->e_size_esn = (edata->e_size_esn & ~EDATA_ESN_MASK)
| (esn & EDATA_ESN_MASK);
}
static inline void
@ -526,25 +541,26 @@ edata_ps_set(edata_t *edata, hpdata_t *ps) {
static inline void
edata_szind_set(edata_t *edata, szind_t szind) {
assert(szind <= SC_NSIZES); /* SC_NSIZES means "invalid". */
edata->e_bits = (edata->e_bits & ~EDATA_BITS_SZIND_MASK) |
((uint64_t)szind << EDATA_BITS_SZIND_SHIFT);
edata->e_bits = (edata->e_bits & ~EDATA_BITS_SZIND_MASK)
| ((uint64_t)szind << EDATA_BITS_SZIND_SHIFT);
}
static inline void
edata_nfree_set(edata_t *edata, unsigned nfree) {
assert(edata_slab_get(edata));
edata->e_bits = (edata->e_bits & ~EDATA_BITS_NFREE_MASK) |
((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT);
edata->e_bits = (edata->e_bits & ~EDATA_BITS_NFREE_MASK)
| ((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT);
}
static inline void
edata_nfree_binshard_set(edata_t *edata, unsigned nfree, unsigned binshard) {
/* The assertion assumes szind is set already. */
assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
edata->e_bits = (edata->e_bits &
(~EDATA_BITS_NFREE_MASK & ~EDATA_BITS_BINSHARD_MASK)) |
((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT) |
((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT);
edata->e_bits = (edata->e_bits
& (~EDATA_BITS_NFREE_MASK
& ~EDATA_BITS_BINSHARD_MASK))
| ((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT)
| ((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT);
}
static inline void
@ -572,38 +588,38 @@ edata_sn_set(edata_t *edata, uint64_t sn) {
static inline void
edata_state_set(edata_t *edata, extent_state_t state) {
edata->e_bits = (edata->e_bits & ~EDATA_BITS_STATE_MASK) |
((uint64_t)state << EDATA_BITS_STATE_SHIFT);
edata->e_bits = (edata->e_bits & ~EDATA_BITS_STATE_MASK)
| ((uint64_t)state << EDATA_BITS_STATE_SHIFT);
}
static inline void
edata_guarded_set(edata_t *edata, bool guarded) {
edata->e_bits = (edata->e_bits & ~EDATA_BITS_GUARDED_MASK) |
((uint64_t)guarded << EDATA_BITS_GUARDED_SHIFT);
edata->e_bits = (edata->e_bits & ~EDATA_BITS_GUARDED_MASK)
| ((uint64_t)guarded << EDATA_BITS_GUARDED_SHIFT);
}
static inline void
edata_zeroed_set(edata_t *edata, bool zeroed) {
edata->e_bits = (edata->e_bits & ~EDATA_BITS_ZEROED_MASK) |
((uint64_t)zeroed << EDATA_BITS_ZEROED_SHIFT);
edata->e_bits = (edata->e_bits & ~EDATA_BITS_ZEROED_MASK)
| ((uint64_t)zeroed << EDATA_BITS_ZEROED_SHIFT);
}
static inline void
edata_committed_set(edata_t *edata, bool committed) {
edata->e_bits = (edata->e_bits & ~EDATA_BITS_COMMITTED_MASK) |
((uint64_t)committed << EDATA_BITS_COMMITTED_SHIFT);
edata->e_bits = (edata->e_bits & ~EDATA_BITS_COMMITTED_MASK)
| ((uint64_t)committed << EDATA_BITS_COMMITTED_SHIFT);
}
static inline void
edata_pai_set(edata_t *edata, extent_pai_t pai) {
edata->e_bits = (edata->e_bits & ~EDATA_BITS_PAI_MASK) |
((uint64_t)pai << EDATA_BITS_PAI_SHIFT);
edata->e_bits = (edata->e_bits & ~EDATA_BITS_PAI_MASK)
| ((uint64_t)pai << EDATA_BITS_PAI_SHIFT);
}
static inline void
edata_slab_set(edata_t *edata, bool slab) {
edata->e_bits = (edata->e_bits & ~EDATA_BITS_SLAB_MASK) |
((uint64_t)slab << EDATA_BITS_SLAB_SHIFT);
edata->e_bits = (edata->e_bits & ~EDATA_BITS_SLAB_MASK)
| ((uint64_t)slab << EDATA_BITS_SLAB_SHIFT);
}
static inline void
@ -622,22 +638,22 @@ edata_prof_alloc_size_set(edata_t *edata, size_t size) {
}
static inline void
edata_prof_recent_alloc_set_dont_call_directly(edata_t *edata,
prof_recent_t *recent_alloc) {
edata_prof_recent_alloc_set_dont_call_directly(
edata_t *edata, prof_recent_t *recent_alloc) {
atomic_store_p(&edata->e_prof_info.e_prof_recent_alloc, recent_alloc,
ATOMIC_RELAXED);
}
static inline bool
edata_is_head_get(edata_t *edata) {
return (bool)((edata->e_bits & EDATA_BITS_IS_HEAD_MASK) >>
EDATA_BITS_IS_HEAD_SHIFT);
return (bool)((edata->e_bits & EDATA_BITS_IS_HEAD_MASK)
>> EDATA_BITS_IS_HEAD_SHIFT);
}
static inline void
edata_is_head_set(edata_t *edata, bool is_head) {
edata->e_bits = (edata->e_bits & ~EDATA_BITS_IS_HEAD_MASK) |
((uint64_t)is_head << EDATA_BITS_IS_HEAD_SHIFT);
edata->e_bits = (edata->e_bits & ~EDATA_BITS_IS_HEAD_MASK)
| ((uint64_t)is_head << EDATA_BITS_IS_HEAD_SHIFT);
}
static inline bool
@ -676,8 +692,8 @@ edata_init(edata_t *edata, unsigned arena_ind, void *addr, size_t size,
}
static inline void
edata_binit(edata_t *edata, void *addr, size_t bsize, uint64_t sn,
bool reused) {
edata_binit(
edata_t *edata, void *addr, size_t bsize, uint64_t sn, bool reused) {
edata_arena_ind_set(edata, (1U << MALLOCX_ARENA_BITS) - 1);
edata_addr_set(edata, addr);
edata_bsize_set(edata, bsize);
@ -729,11 +745,13 @@ edata_cmp_summary_encode(edata_cmp_summary_t src) {
static inline int
edata_cmp_summary_comp(edata_cmp_summary_t a, edata_cmp_summary_t b) {
unsigned __int128 a_encoded = edata_cmp_summary_encode(a);
unsigned __int128 b_encoded = edata_cmp_summary_encode(b);
if (a_encoded < b_encoded) return -1;
if (a_encoded == b_encoded) return 0;
return 1;
unsigned __int128 a_encoded = edata_cmp_summary_encode(a);
unsigned __int128 b_encoded = edata_cmp_summary_encode(b);
if (a_encoded < b_encoded)
return -1;
if (a_encoded == b_encoded)
return 0;
return 1;
}
#else
static inline int
@ -750,8 +768,8 @@ edata_cmp_summary_comp(edata_cmp_summary_t a, edata_cmp_summary_t b) {
* prediction accuracy is not great. As a result, this implementation
* is measurably faster (by around 30%).
*/
return (2 * ((a.sn > b.sn) - (a.sn < b.sn))) +
((a.addr > b.addr) - (a.addr < b.addr));
return (2 * ((a.sn > b.sn) - (a.sn < b.sn)))
+ ((a.addr > b.addr) - (a.addr < b.addr));
}
#endif
@ -772,7 +790,6 @@ edata_esnead_comp(const edata_t *a, const edata_t *b) {
return (2 * edata_esn_comp(a, b)) + edata_ead_comp(a, b);
}
ph_proto(, edata_avail, edata_t)
ph_proto(, edata_heap, edata_t)
ph_proto(, edata_avail, edata_t) ph_proto(, edata_heap, edata_t)
#endif /* JEMALLOC_INTERNAL_EDATA_H */

View file

@ -15,13 +15,13 @@
typedef struct edata_cache_s edata_cache_t;
struct edata_cache_s {
edata_avail_t avail;
atomic_zu_t count;
edata_avail_t avail;
atomic_zu_t count;
malloc_mutex_t mtx;
base_t *base;
base_t *base;
};
bool edata_cache_init(edata_cache_t *edata_cache, base_t *base);
bool edata_cache_init(edata_cache_t *edata_cache, base_t *base);
edata_t *edata_cache_get(tsdn_t *tsdn, edata_cache_t *edata_cache);
void edata_cache_put(tsdn_t *tsdn, edata_cache_t *edata_cache, edata_t *edata);
@ -37,14 +37,14 @@ void edata_cache_postfork_child(tsdn_t *tsdn, edata_cache_t *edata_cache);
typedef struct edata_cache_fast_s edata_cache_fast_t;
struct edata_cache_fast_s {
edata_list_inactive_t list;
edata_cache_t *fallback;
bool disabled;
edata_cache_t *fallback;
bool disabled;
};
void edata_cache_fast_init(edata_cache_fast_t *ecs, edata_cache_t *fallback);
edata_t *edata_cache_fast_get(tsdn_t *tsdn, edata_cache_fast_t *ecs);
void edata_cache_fast_put(tsdn_t *tsdn, edata_cache_fast_t *ecs,
edata_t *edata);
void edata_cache_fast_put(
tsdn_t *tsdn, edata_cache_fast_t *ecs, edata_t *edata);
void edata_cache_fast_disable(tsdn_t *tsdn, edata_cache_fast_t *ecs);
#endif /* JEMALLOC_INTERNAL_EDATA_CACHE_H */

View file

@ -46,10 +46,10 @@ extern const extent_hooks_t ehooks_default_extent_hooks;
*/
void *ehooks_default_alloc_impl(tsdn_t *tsdn, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
bool ehooks_default_dalloc_impl(void *addr, size_t size);
void ehooks_default_destroy_impl(void *addr, size_t size);
bool ehooks_default_commit_impl(void *addr, size_t offset, size_t length);
bool ehooks_default_decommit_impl(void *addr, size_t offset, size_t length);
bool ehooks_default_dalloc_impl(void *addr, size_t size);
void ehooks_default_destroy_impl(void *addr, size_t size);
bool ehooks_default_commit_impl(void *addr, size_t offset, size_t length);
bool ehooks_default_decommit_impl(void *addr, size_t offset, size_t length);
#ifdef PAGES_CAN_PURGE_LAZY
bool ehooks_default_purge_lazy_impl(void *addr, size_t offset, size_t length);
#endif
@ -116,8 +116,8 @@ ehooks_get_extent_hooks_ptr(ehooks_t *ehooks) {
static inline bool
ehooks_are_default(ehooks_t *ehooks) {
return ehooks_get_extent_hooks_ptr(ehooks) ==
&ehooks_default_extent_hooks;
return ehooks_get_extent_hooks_ptr(ehooks)
== &ehooks_default_extent_hooks;
}
/*
@ -189,16 +189,15 @@ ehooks_debug_zero_check(void *addr, size_t size) {
}
}
static inline void *
ehooks_alloc(tsdn_t *tsdn, ehooks_t *ehooks, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit) {
bool orig_zero = *zero;
void *ret;
bool orig_zero = *zero;
void *ret;
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
if (extent_hooks == &ehooks_default_extent_hooks) {
ret = ehooks_default_alloc_impl(tsdn, new_addr, size,
alignment, zero, commit, ehooks_ind_get(ehooks));
ret = ehooks_default_alloc_impl(tsdn, new_addr, size, alignment,
zero, commit, ehooks_ind_get(ehooks));
} else {
ehooks_pre_reentrancy(tsdn);
ret = extent_hooks->alloc(extent_hooks, new_addr, size,
@ -214,8 +213,8 @@ ehooks_alloc(tsdn_t *tsdn, ehooks_t *ehooks, void *new_addr, size_t size,
}
static inline bool
ehooks_dalloc(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
bool committed) {
ehooks_dalloc(
tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size, bool committed) {
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
if (extent_hooks == &ehooks_default_extent_hooks) {
return ehooks_default_dalloc_impl(addr, size);
@ -231,8 +230,8 @@ ehooks_dalloc(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
}
static inline void
ehooks_destroy(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
bool committed) {
ehooks_destroy(
tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size, bool committed) {
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
if (extent_hooks == &ehooks_default_extent_hooks) {
ehooks_default_destroy_impl(addr, size);
@ -250,15 +249,15 @@ static inline bool
ehooks_commit(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
size_t offset, size_t length) {
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
bool err;
bool err;
if (extent_hooks == &ehooks_default_extent_hooks) {
err = ehooks_default_commit_impl(addr, offset, length);
} else if (extent_hooks->commit == NULL) {
err = true;
} else {
ehooks_pre_reentrancy(tsdn);
err = extent_hooks->commit(extent_hooks, addr, size,
offset, length, ehooks_ind_get(ehooks));
err = extent_hooks->commit(extent_hooks, addr, size, offset,
length, ehooks_ind_get(ehooks));
ehooks_post_reentrancy(tsdn);
}
if (!err) {
@ -384,7 +383,7 @@ ehooks_zero(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size) {
static inline bool
ehooks_guard(tsdn_t *tsdn, ehooks_t *ehooks, void *guard1, void *guard2) {
bool err;
bool err;
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
if (extent_hooks == &ehooks_default_extent_hooks) {
@ -399,7 +398,7 @@ ehooks_guard(tsdn_t *tsdn, ehooks_t *ehooks, void *guard1, void *guard2) {
static inline bool
ehooks_unguard(tsdn_t *tsdn, ehooks_t *ehooks, void *guard1, void *guard2) {
bool err;
bool err;
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
if (extent_hooks == &ehooks_default_extent_hooks) {

View file

@ -10,9 +10,9 @@
* EMAP_DECLARE_RTREE_CTX;
* in uses will avoid empty-statement warnings.
*/
#define EMAP_DECLARE_RTREE_CTX \
rtree_ctx_t rtree_ctx_fallback; \
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback)
#define EMAP_DECLARE_RTREE_CTX \
rtree_ctx_t rtree_ctx_fallback; \
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback)
typedef struct emap_s emap_t;
struct emap_s {
@ -22,25 +22,25 @@ struct emap_s {
/* Used to pass rtree lookup context down the path. */
typedef struct emap_alloc_ctx_s emap_alloc_ctx_t;
struct emap_alloc_ctx_s {
size_t usize;
size_t usize;
szind_t szind;
bool slab;
bool slab;
};
typedef struct emap_full_alloc_ctx_s emap_full_alloc_ctx_t;
struct emap_full_alloc_ctx_s {
szind_t szind;
bool slab;
szind_t szind;
bool slab;
edata_t *edata;
};
bool emap_init(emap_t *emap, base_t *base, bool zeroed);
void emap_remap(tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind,
bool slab);
void emap_remap(
tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind, bool slab);
void emap_update_edata_state(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
extent_state_t state);
void emap_update_edata_state(
tsdn_t *tsdn, emap_t *emap, edata_t *edata, extent_state_t state);
/*
* The two acquire functions below allow accessing neighbor edatas, if it's safe
@ -62,16 +62,16 @@ edata_t *emap_try_acquire_edata_neighbor(tsdn_t *tsdn, emap_t *emap,
bool forward);
edata_t *emap_try_acquire_edata_neighbor_expand(tsdn_t *tsdn, emap_t *emap,
edata_t *edata, extent_pai_t pai, extent_state_t expected_state);
void emap_release_edata(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
extent_state_t new_state);
void emap_release_edata(
tsdn_t *tsdn, emap_t *emap, edata_t *edata, extent_state_t new_state);
/*
* Associate the given edata with its beginning and end address, setting the
* szind and slab info appropriately.
* Returns true on error (i.e. resource exhaustion).
*/
bool emap_register_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
szind_t szind, bool slab);
bool emap_register_boundary(
tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind, bool slab);
/*
* Does the same thing, but with the interior of the range, for slab
@ -92,8 +92,8 @@ bool emap_register_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
* touched, so no allocation is necessary to fill the interior once the boundary
* has been touched.
*/
void emap_register_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
szind_t szind);
void emap_register_interior(
tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind);
void emap_deregister_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
void emap_deregister_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
@ -161,8 +161,8 @@ emap_edata_in_transition(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
emap_assert_mapped(tsdn, emap, edata);
EMAP_DECLARE_RTREE_CTX;
rtree_contents_t contents = rtree_read(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)edata_base_get(edata));
rtree_contents_t contents = rtree_read(
tsdn, &emap->rtree, rtree_ctx, (uintptr_t)edata_base_get(edata));
return edata_state_in_transition(contents.metadata.state);
}
@ -194,9 +194,9 @@ emap_edata_is_acquired(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
}
rtree_contents_t contents = rtree_leaf_elm_read(tsdn, &emap->rtree, elm,
/* dependent */ false);
if (contents.edata == NULL ||
contents.metadata.state == extent_state_active ||
edata_state_in_transition(contents.metadata.state)) {
if (contents.edata == NULL
|| contents.metadata.state == extent_state_active
|| edata_state_in_transition(contents.metadata.state)) {
return true;
}
@ -211,8 +211,8 @@ extent_assert_can_coalesce(const edata_t *inner, const edata_t *outer) {
assert(edata_state_get(inner) == extent_state_active);
assert(edata_state_get(outer) == extent_state_merging);
assert(!edata_guarded_get(inner) && !edata_guarded_get(outer));
assert(edata_base_get(inner) == edata_past_get(outer) ||
edata_base_get(outer) == edata_past_get(inner));
assert(edata_base_get(inner) == edata_past_get(outer)
|| edata_base_get(outer) == edata_past_get(inner));
}
JEMALLOC_ALWAYS_INLINE void
@ -232,13 +232,13 @@ emap_edata_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr) {
}
JEMALLOC_ALWAYS_INLINE void
emap_alloc_ctx_init(emap_alloc_ctx_t *alloc_ctx, szind_t szind, bool slab,
size_t usize) {
emap_alloc_ctx_init(
emap_alloc_ctx_t *alloc_ctx, szind_t szind, bool slab, size_t usize) {
alloc_ctx->szind = szind;
alloc_ctx->slab = slab;
alloc_ctx->usize = usize;
assert(sz_large_size_classes_disabled() ||
usize == sz_index2size(szind));
assert(
sz_large_size_classes_disabled() || usize == sz_index2size(szind));
}
JEMALLOC_ALWAYS_INLINE size_t
@ -248,27 +248,29 @@ emap_alloc_ctx_usize_get(emap_alloc_ctx_t *alloc_ctx) {
assert(alloc_ctx->usize == sz_index2size(alloc_ctx->szind));
return sz_index2size(alloc_ctx->szind);
}
assert(sz_large_size_classes_disabled() ||
alloc_ctx->usize == sz_index2size(alloc_ctx->szind));
assert(sz_large_size_classes_disabled()
|| alloc_ctx->usize == sz_index2size(alloc_ctx->szind));
assert(alloc_ctx->usize <= SC_LARGE_MAXCLASS);
return alloc_ctx->usize;
}
/* Fills in alloc_ctx with the info in the map. */
JEMALLOC_ALWAYS_INLINE void
emap_alloc_ctx_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr,
emap_alloc_ctx_t *alloc_ctx) {
emap_alloc_ctx_lookup(
tsdn_t *tsdn, emap_t *emap, const void *ptr, emap_alloc_ctx_t *alloc_ctx) {
EMAP_DECLARE_RTREE_CTX;
rtree_contents_t contents = rtree_read(tsdn, &emap->rtree,
rtree_ctx, (uintptr_t)ptr);
rtree_contents_t contents = rtree_read(
tsdn, &emap->rtree, rtree_ctx, (uintptr_t)ptr);
/*
* If the alloc is invalid, do not calculate usize since edata
* could be corrupted.
*/
emap_alloc_ctx_init(alloc_ctx, contents.metadata.szind,
contents.metadata.slab, (contents.metadata.szind == SC_NSIZES
|| contents.edata == NULL)? 0: edata_usize_get(contents.edata));
contents.metadata.slab,
(contents.metadata.szind == SC_NSIZES || contents.edata == NULL)
? 0
: edata_usize_get(contents.edata));
}
/* The pointer must be mapped. */
@ -277,8 +279,8 @@ emap_full_alloc_ctx_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr,
emap_full_alloc_ctx_t *full_alloc_ctx) {
EMAP_DECLARE_RTREE_CTX;
rtree_contents_t contents = rtree_read(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)ptr);
rtree_contents_t contents = rtree_read(
tsdn, &emap->rtree, rtree_ctx, (uintptr_t)ptr);
full_alloc_ctx->edata = contents.edata;
full_alloc_ctx->szind = contents.metadata.szind;
full_alloc_ctx->slab = contents.metadata.slab;
@ -295,8 +297,8 @@ emap_full_alloc_ctx_try_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr,
EMAP_DECLARE_RTREE_CTX;
rtree_contents_t contents;
bool err = rtree_read_independent(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)ptr, &contents);
bool err = rtree_read_independent(
tsdn, &emap->rtree, rtree_ctx, (uintptr_t)ptr, &contents);
if (err) {
return true;
}
@ -311,14 +313,14 @@ emap_full_alloc_ctx_try_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr,
* fast path, e.g. when the metadata key is not cached.
*/
JEMALLOC_ALWAYS_INLINE bool
emap_alloc_ctx_try_lookup_fast(tsd_t *tsd, emap_t *emap, const void *ptr,
emap_alloc_ctx_t *alloc_ctx) {
emap_alloc_ctx_try_lookup_fast(
tsd_t *tsd, emap_t *emap, const void *ptr, emap_alloc_ctx_t *alloc_ctx) {
/* Use the unsafe getter since this may gets called during exit. */
rtree_ctx_t *rtree_ctx = tsd_rtree_ctxp_get_unsafe(tsd);
rtree_metadata_t metadata;
bool err = rtree_metadata_try_read_fast(tsd_tsdn(tsd), &emap->rtree,
rtree_ctx, (uintptr_t)ptr, &metadata);
bool err = rtree_metadata_try_read_fast(
tsd_tsdn(tsd), &emap->rtree, rtree_ctx, (uintptr_t)ptr, &metadata);
if (err) {
return true;
}
@ -345,11 +347,12 @@ typedef const void *(*emap_ptr_getter)(void *ctx, size_t ind);
* This allows size-checking assertions, which we can only do while we're in the
* process of edata lookups.
*/
typedef void (*emap_metadata_visitor)(void *ctx, emap_full_alloc_ctx_t *alloc_ctx);
typedef void (*emap_metadata_visitor)(
void *ctx, emap_full_alloc_ctx_t *alloc_ctx);
typedef union emap_batch_lookup_result_u emap_batch_lookup_result_t;
union emap_batch_lookup_result_u {
edata_t *edata;
edata_t *edata;
rtree_leaf_elm_t *rtree_leaf;
};
@ -375,8 +378,8 @@ emap_edata_lookup_batch(tsd_t *tsd, emap_t *emap, size_t nptrs,
for (size_t i = 0; i < nptrs; i++) {
rtree_leaf_elm_t *elm = result[i].rtree_leaf;
rtree_contents_t contents = rtree_leaf_elm_read(tsd_tsdn(tsd),
&emap->rtree, elm, /* dependent */ true);
rtree_contents_t contents = rtree_leaf_elm_read(
tsd_tsdn(tsd), &emap->rtree, elm, /* dependent */ true);
result[i].edata = contents.edata;
emap_full_alloc_ctx_t alloc_ctx;
/*

View file

@ -44,18 +44,18 @@ typedef struct emitter_col_s emitter_col_t;
struct emitter_col_s {
/* Filled in by the user. */
emitter_justify_t justify;
int width;
emitter_type_t type;
int width;
emitter_type_t type;
union {
bool bool_val;
int int_val;
unsigned unsigned_val;
uint32_t uint32_val;
uint32_t uint32_t_val;
uint64_t uint64_val;
uint64_t uint64_t_val;
size_t size_val;
ssize_t ssize_val;
bool bool_val;
int int_val;
unsigned unsigned_val;
uint32_t uint32_val;
uint32_t uint32_t_val;
uint64_t uint64_val;
uint64_t uint64_t_val;
size_t size_val;
ssize_t ssize_val;
const char *str_val;
};
@ -73,8 +73,8 @@ struct emitter_s {
emitter_output_t output;
/* The output information. */
write_cb_t *write_cb;
void *cbopaque;
int nesting_depth;
void *cbopaque;
int nesting_depth;
/* True if we've already emitted a value at the given depth. */
bool item_at_depth;
/* True if we emitted a key and will emit corresponding value next. */
@ -83,8 +83,8 @@ struct emitter_s {
static inline bool
emitter_outputs_json(emitter_t *emitter) {
return emitter->output == emitter_output_json ||
emitter->output == emitter_output_json_compact;
return emitter->output == emitter_output_json
|| emitter->output == emitter_output_json_compact;
}
/* Internal convenience function. Write to the emitter the given string. */
@ -98,23 +98,23 @@ emitter_printf(emitter_t *emitter, const char *format, ...) {
va_end(ap);
}
static inline const char * JEMALLOC_FORMAT_ARG(3)
emitter_gen_fmt(char *out_fmt, size_t out_size, const char *fmt_specifier,
emitter_justify_t justify, int width) {
static inline const char *
JEMALLOC_FORMAT_ARG(3) emitter_gen_fmt(char *out_fmt, size_t out_size,
const char *fmt_specifier, emitter_justify_t justify, int width) {
size_t written;
fmt_specifier++;
if (justify == emitter_justify_none) {
written = malloc_snprintf(out_fmt, out_size,
"%%%s", fmt_specifier);
written = malloc_snprintf(
out_fmt, out_size, "%%%s", fmt_specifier);
} else if (justify == emitter_justify_left) {
written = malloc_snprintf(out_fmt, out_size,
"%%-%d%s", width, fmt_specifier);
written = malloc_snprintf(
out_fmt, out_size, "%%-%d%s", width, fmt_specifier);
} else {
written = malloc_snprintf(out_fmt, out_size,
"%%%d%s", width, fmt_specifier);
written = malloc_snprintf(
out_fmt, out_size, "%%%d%s", width, fmt_specifier);
}
/* Only happens in case of bad format string, which *we* choose. */
assert(written < out_size);
assert(written < out_size);
return out_fmt;
}
@ -122,10 +122,10 @@ static inline void
emitter_emit_str(emitter_t *emitter, emitter_justify_t justify, int width,
char *fmt, size_t fmt_size, const char *str) {
#define BUF_SIZE 256
char buf[BUF_SIZE];
char buf[BUF_SIZE];
size_t str_written = malloc_snprintf(buf, BUF_SIZE, "\"%s\"", str);
emitter_printf(emitter,
emitter_gen_fmt(fmt, fmt_size, "%s", justify, width), buf);
emitter_printf(
emitter, emitter_gen_fmt(fmt, fmt_size, "%s", justify, width), buf);
if (str_written < BUF_SIZE) {
return;
}
@ -168,16 +168,16 @@ emitter_print_value(emitter_t *emitter, emitter_justify_t justify, int width,
*/
char fmt[FMT_SIZE];
#define EMIT_SIMPLE(type, format) \
emitter_printf(emitter, \
emitter_gen_fmt(fmt, FMT_SIZE, format, justify, width), \
#define EMIT_SIMPLE(type, format) \
emitter_printf(emitter, \
emitter_gen_fmt(fmt, FMT_SIZE, format, justify, width), \
*(const type *)value);
switch (value_type) {
case emitter_type_bool:
emitter_printf(emitter,
emitter_gen_fmt(fmt, FMT_SIZE, "%s", justify, width),
*(const bool *)value ? "true" : "false");
*(const bool *)value ? "true" : "false");
break;
case emitter_type_int:
EMIT_SIMPLE(int, "%d")
@ -213,7 +213,6 @@ emitter_print_value(emitter_t *emitter, emitter_justify_t justify, int width,
#undef FMT_SIZE
}
/* Internal functions. In json mode, tracks nesting state. */
static inline void
emitter_nest_inc(emitter_t *emitter) {
@ -229,7 +228,7 @@ emitter_nest_dec(emitter_t *emitter) {
static inline void
emitter_indent(emitter_t *emitter) {
int amount = emitter->nesting_depth;
int amount = emitter->nesting_depth;
const char *indent_str;
assert(emitter->output != emitter_output_json_compact);
if (emitter->output == emitter_output_json) {
@ -291,12 +290,12 @@ emitter_json_key(emitter_t *emitter, const char *json_key) {
}
static inline void
emitter_json_value(emitter_t *emitter, emitter_type_t value_type,
const void *value) {
emitter_json_value(
emitter_t *emitter, emitter_type_t value_type, const void *value) {
if (emitter_outputs_json(emitter)) {
emitter_json_key_prefix(emitter);
emitter_print_value(emitter, emitter_justify_none, -1,
value_type, value);
emitter_print_value(
emitter, emitter_justify_none, -1, value_type, value);
emitter->item_at_depth = true;
}
}
@ -367,7 +366,6 @@ emitter_json_object_end(emitter_t *emitter) {
}
}
/******************************************************************************/
/* Table public API. */
@ -389,14 +387,13 @@ emitter_table_dict_end(emitter_t *emitter) {
static inline void
emitter_table_kv_note(emitter_t *emitter, const char *table_key,
emitter_type_t value_type, const void *value,
const char *table_note_key, emitter_type_t table_note_value_type,
const void *table_note_value) {
emitter_type_t value_type, const void *value, const char *table_note_key,
emitter_type_t table_note_value_type, const void *table_note_value) {
if (emitter->output == emitter_output_table) {
emitter_indent(emitter);
emitter_printf(emitter, "%s: ", table_key);
emitter_print_value(emitter, emitter_justify_none, -1,
value_type, value);
emitter_print_value(
emitter, emitter_justify_none, -1, value_type, value);
if (table_note_key != NULL) {
emitter_printf(emitter, " (%s: ", table_note_key);
emitter_print_value(emitter, emitter_justify_none, -1,
@ -415,7 +412,6 @@ emitter_table_kv(emitter_t *emitter, const char *table_key,
emitter_type_bool, NULL);
}
/* Write to the emitter the given string, but only in table mode. */
JEMALLOC_FORMAT_PRINTF(2, 3)
static inline void
@ -423,7 +419,8 @@ emitter_table_printf(emitter_t *emitter, const char *format, ...) {
if (emitter->output == emitter_output_table) {
va_list ap;
va_start(ap, format);
malloc_vcprintf(emitter->write_cb, emitter->cbopaque, format, ap);
malloc_vcprintf(
emitter->write_cb, emitter->cbopaque, format, ap);
va_end(ap);
}
}
@ -434,7 +431,7 @@ emitter_table_row(emitter_t *emitter, emitter_row_t *row) {
return;
}
emitter_col_t *col;
ql_foreach(col, &row->cols, link) {
ql_foreach (col, &row->cols, link) {
emitter_print_value(emitter, col->justify, col->width,
col->type, (const void *)&col->bool_val);
}
@ -452,7 +449,6 @@ emitter_col_init(emitter_col_t *col, emitter_row_t *row) {
ql_tail_insert(&row->cols, col, link);
}
/******************************************************************************/
/*
* Generalized public API. Emits using either JSON or table, according to
@ -464,9 +460,8 @@ emitter_col_init(emitter_col_t *col, emitter_row_t *row) {
*/
static inline void
emitter_kv_note(emitter_t *emitter, const char *json_key, const char *table_key,
emitter_type_t value_type, const void *value,
const char *table_note_key, emitter_type_t table_note_value_type,
const void *table_note_value) {
emitter_type_t value_type, const void *value, const char *table_note_key,
emitter_type_t table_note_value_type, const void *table_note_value) {
if (emitter_outputs_json(emitter)) {
emitter_json_key(emitter, json_key);
emitter_json_value(emitter, value_type, value);
@ -485,8 +480,8 @@ emitter_kv(emitter_t *emitter, const char *json_key, const char *table_key,
}
static inline void
emitter_dict_begin(emitter_t *emitter, const char *json_key,
const char *table_header) {
emitter_dict_begin(
emitter_t *emitter, const char *json_key, const char *table_header) {
if (emitter_outputs_json(emitter)) {
emitter_json_key(emitter, json_key);
emitter_json_object_begin(emitter);
@ -526,8 +521,9 @@ emitter_end(emitter_t *emitter) {
if (emitter_outputs_json(emitter)) {
assert(emitter->nesting_depth == 1);
emitter_nest_dec(emitter);
emitter_printf(emitter, "%s", emitter->output ==
emitter_output_json_compact ? "}" : "\n}\n");
emitter_printf(emitter, "%s",
emitter->output == emitter_output_json_compact ? "}"
: "\n}\n");
}
}

View file

@ -27,8 +27,7 @@ exp_grow_size_prepare(exp_grow_t *exp_grow, size_t alloc_size_min,
*r_alloc_size = sz_pind2sz(exp_grow->next + *r_skip);
while (*r_alloc_size < alloc_size_min) {
(*r_skip)++;
if (exp_grow->next + *r_skip >=
sz_psz2ind(SC_LARGE_MAXCLASS)) {
if (exp_grow->next + *r_skip >= sz_psz2ind(SC_LARGE_MAXCLASS)) {
/* Outside legal range. */
return true;
}
@ -44,7 +43,6 @@ exp_grow_size_commit(exp_grow_t *exp_grow, pszind_t skip) {
} else {
exp_grow->next = exp_grow->limit;
}
}
void exp_grow_init(exp_grow_t *exp_grow);

View file

@ -26,9 +26,10 @@ extern size_t opt_process_madvise_max_batch;
#ifdef JEMALLOC_HAVE_PROCESS_MADVISE
/* The iovec is on stack. Limit the max batch to avoid stack overflow. */
#define PROCESS_MADVISE_MAX_BATCH_LIMIT (VARIABLE_ARRAY_SIZE_MAX / sizeof(struct iovec))
# define PROCESS_MADVISE_MAX_BATCH_LIMIT \
(VARIABLE_ARRAY_SIZE_MAX / sizeof(struct iovec))
#else
#define PROCESS_MADVISE_MAX_BATCH_LIMIT 0
# define PROCESS_MADVISE_MAX_BATCH_LIMIT 0
#endif
edata_t *ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
@ -37,44 +38,43 @@ edata_t *ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
bool zero, bool guarded);
void ecache_dalloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
ecache_t *ecache, edata_t *edata);
void ecache_dalloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
edata_t *edata);
edata_t *ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
ecache_t *ecache, size_t npages_min);
void extent_gdump_add(tsdn_t *tsdn, const edata_t *edata);
void extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
edata_t *edata);
void extent_dalloc_gap(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *edata);
void extent_dalloc_gap(
tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata);
edata_t *extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
void *new_addr, size_t size, size_t alignment, bool zero, bool *commit,
bool growing_retained);
void extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *edata);
void extent_dalloc_wrapper_purged(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *edata);
void extent_destroy_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *edata);
void extent_dalloc_wrapper(
tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata);
void extent_dalloc_wrapper_purged(
tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata);
void extent_destroy_wrapper(
tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata);
bool extent_commit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
size_t offset, size_t length);
bool extent_purge_lazy_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
size_t offset, size_t length);
bool extent_purge_forced_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
size_t offset, size_t length);
edata_t *extent_split_wrapper(tsdn_t *tsdn, pac_t *pac,
ehooks_t *ehooks, edata_t *edata, size_t size_a, size_t size_b,
bool holding_core_locks);
bool extent_merge_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *a, edata_t *b);
bool extent_commit_zero(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
bool commit, bool zero, bool growing_retained);
edata_t *extent_split_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *edata, size_t size_a, size_t size_b, bool holding_core_locks);
bool extent_merge_wrapper(
tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *a, edata_t *b);
bool extent_commit_zero(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
bool commit, bool zero, bool growing_retained);
size_t extent_sn_next(pac_t *pac);
bool extent_boot(void);
bool extent_boot(void);
JEMALLOC_ALWAYS_INLINE bool
extent_neighbor_head_state_mergeable(bool edata_is_head,
bool neighbor_is_head, bool forward) {
extent_neighbor_head_state_mergeable(
bool edata_is_head, bool neighbor_is_head, bool forward) {
/*
* Head states checking: disallow merging if the higher addr extent is a
* head extent. This helps preserve first-fit, and more importantly
@ -102,8 +102,8 @@ extent_can_acquire_neighbor(edata_t *edata, rtree_contents_t contents,
}
/* It's not safe to access *neighbor yet; must verify states first. */
bool neighbor_is_head = contents.metadata.is_head;
if (!extent_neighbor_head_state_mergeable(edata_is_head_get(edata),
neighbor_is_head, forward)) {
if (!extent_neighbor_head_state_mergeable(
edata_is_head_get(edata), neighbor_is_head, forward)) {
return false;
}
extent_state_t neighbor_state = contents.metadata.state;
@ -112,8 +112,9 @@ extent_can_acquire_neighbor(edata_t *edata, rtree_contents_t contents,
return false;
}
/* From this point, it's safe to access *neighbor. */
if (!expanding && (edata_committed_get(edata) !=
edata_committed_get(neighbor))) {
if (!expanding
&& (edata_committed_get(edata)
!= edata_committed_get(neighbor))) {
/*
* Some platforms (e.g. Windows) require an explicit
* commit step (and writing to uncommitted memory is not
@ -133,11 +134,11 @@ extent_can_acquire_neighbor(edata_t *edata, rtree_contents_t contents,
return false;
}
if (opt_retain) {
assert(edata_arena_ind_get(edata) ==
edata_arena_ind_get(neighbor));
assert(edata_arena_ind_get(edata)
== edata_arena_ind_get(neighbor));
} else {
if (edata_arena_ind_get(edata) !=
edata_arena_ind_get(neighbor)) {
if (edata_arena_ind_get(edata)
!= edata_arena_ind_get(neighbor)) {
return false;
}
}

View file

@ -6,11 +6,11 @@
#include "jemalloc/internal/tsd_types.h"
typedef enum {
dss_prec_disabled = 0,
dss_prec_primary = 1,
dss_prec_disabled = 0,
dss_prec_primary = 1,
dss_prec_secondary = 2,
dss_prec_limit = 3
dss_prec_limit = 3
} dss_prec_t;
#define DSS_PREC_DEFAULT dss_prec_secondary
#define DSS_DEFAULT "secondary"
@ -20,11 +20,11 @@ extern const char *const dss_prec_names[];
extern const char *opt_dss;
dss_prec_t extent_dss_prec_get(void);
bool extent_dss_prec_set(dss_prec_t dss_prec);
void *extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr,
size_t size, size_t alignment, bool *zero, bool *commit);
bool extent_in_dss(void *addr);
bool extent_dss_mergeable(void *addr_a, void *addr_b);
void extent_dss_boot(void);
bool extent_dss_prec_set(dss_prec_t dss_prec);
void *extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr,
size_t size, size_t alignment, bool *zero, bool *commit);
bool extent_in_dss(void *addr);
bool extent_dss_mergeable(void *addr_a, void *addr_b);
void extent_dss_boot(void);
#endif /* JEMALLOC_INTERNAL_EXTENT_DSS_H */

View file

@ -5,8 +5,8 @@
extern bool opt_retain;
void *extent_alloc_mmap(void *new_addr, size_t size, size_t alignment,
bool *zero, bool *commit);
void *extent_alloc_mmap(
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit);
bool extent_dalloc_mmap(void *addr, size_t size);
#endif /* JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H */

View file

@ -15,8 +15,8 @@
typedef unsigned long fb_group_t;
#define FB_GROUP_BITS (ZU(1) << (LG_SIZEOF_LONG + 3))
#define FB_NGROUPS(nbits) ((nbits) / FB_GROUP_BITS \
+ ((nbits) % FB_GROUP_BITS == 0 ? 0 : 1))
#define FB_NGROUPS(nbits) \
((nbits) / FB_GROUP_BITS + ((nbits) % FB_GROUP_BITS == 0 ? 0 : 1))
static inline void
fb_init(fb_group_t *fb, size_t nbits) {
@ -75,7 +75,6 @@ fb_unset(fb_group_t *fb, size_t nbits, size_t bit) {
fb[group_ind] &= ~((fb_group_t)1 << bit_ind);
}
/*
* Some implementation details. This visitation function lets us apply a group
* visitor to each group in the bitmap (potentially modifying it). The mask
@ -94,7 +93,8 @@ fb_visit_impl(fb_group_t *fb, size_t nbits, fb_group_visitor_t visit, void *ctx,
* to from bit 0.
*/
size_t first_group_cnt = (start_bit_ind + cnt > FB_GROUP_BITS
? FB_GROUP_BITS - start_bit_ind : cnt);
? FB_GROUP_BITS - start_bit_ind
: cnt);
/*
* We can basically split affected words into:
* - The first group, where we touch only the high bits
@ -104,8 +104,8 @@ fb_visit_impl(fb_group_t *fb, size_t nbits, fb_group_visitor_t visit, void *ctx,
* this can lead to bad codegen for those middle words.
*/
/* First group */
fb_group_t mask = ((~(fb_group_t)0)
>> (FB_GROUP_BITS - first_group_cnt))
fb_group_t mask =
((~(fb_group_t)0) >> (FB_GROUP_BITS - first_group_cnt))
<< start_bit_ind;
visit(ctx, &fb[group_ind], mask);
@ -176,12 +176,12 @@ fb_ucount(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) {
* Returns the number of bits in the bitmap if no such bit exists.
*/
JEMALLOC_ALWAYS_INLINE ssize_t
fb_find_impl(fb_group_t *fb, size_t nbits, size_t start, bool val,
bool forward) {
fb_find_impl(
fb_group_t *fb, size_t nbits, size_t start, bool val, bool forward) {
assert(start < nbits);
size_t ngroups = FB_NGROUPS(nbits);
size_t ngroups = FB_NGROUPS(nbits);
ssize_t group_ind = start / FB_GROUP_BITS;
size_t bit_ind = start % FB_GROUP_BITS;
size_t bit_ind = start % FB_GROUP_BITS;
fb_group_t maybe_invert = (val ? 0 : (fb_group_t)-1);
@ -265,8 +265,8 @@ fb_iter_range_impl(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
return false;
}
/* Half open range; the set bits are [begin, end). */
ssize_t next_range_end = fb_find_impl(fb, nbits, next_range_begin, !val,
forward);
ssize_t next_range_end = fb_find_impl(
fb, nbits, next_range_begin, !val, forward);
if (forward) {
*r_begin = next_range_begin;
*r_len = next_range_end - next_range_begin;
@ -324,8 +324,9 @@ fb_range_longest_impl(fb_group_t *fb, size_t nbits, bool val) {
size_t begin = 0;
size_t longest_len = 0;
size_t len = 0;
while (begin < nbits && fb_iter_range_impl(fb, nbits, begin, &begin,
&len, val, /* forward */ true)) {
while (begin < nbits
&& fb_iter_range_impl(
fb, nbits, begin, &begin, &len, val, /* forward */ true)) {
if (len > longest_len) {
longest_len = len;
}

View file

@ -89,7 +89,7 @@ fxp_round_down(fxp_t a) {
static inline uint32_t
fxp_round_nearest(fxp_t a) {
uint32_t fractional_part = (a & ((1U << 16) - 1));
uint32_t fractional_part = (a & ((1U << 16) - 1));
uint32_t increment = (uint32_t)(fractional_part >= (1U << 15));
return (a >> 16) + increment;
}

View file

@ -25,7 +25,7 @@ hash_rotl_64(uint64_t x, int8_t r) {
static inline uint32_t
hash_get_block_32(const uint32_t *p, int i) {
/* Handle unaligned read. */
if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
if (unlikely((uintptr_t)p & (sizeof(uint32_t) - 1)) != 0) {
uint32_t ret;
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t));
@ -38,7 +38,7 @@ hash_get_block_32(const uint32_t *p, int i) {
static inline uint64_t
hash_get_block_64(const uint64_t *p, int i) {
/* Handle unaligned read. */
if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
if (unlikely((uintptr_t)p & (sizeof(uint64_t) - 1)) != 0) {
uint64_t ret;
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t));
@ -72,8 +72,8 @@ hash_fmix_64(uint64_t k) {
static inline uint32_t
hash_x86_32(const void *key, int len, uint32_t seed) {
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len / 4;
const uint8_t *data = (const uint8_t *)key;
const int nblocks = len / 4;
uint32_t h1 = seed;
@ -82,8 +82,8 @@ hash_x86_32(const void *key, int len, uint32_t seed) {
/* body */
{
const uint32_t *blocks = (const uint32_t *) (data + nblocks*4);
int i;
const uint32_t *blocks = (const uint32_t *)(data + nblocks * 4);
int i;
for (i = -nblocks; i; i++) {
uint32_t k1 = hash_get_block_32(blocks, i);
@ -94,21 +94,29 @@ hash_x86_32(const void *key, int len, uint32_t seed) {
h1 ^= k1;
h1 = hash_rotl_32(h1, 13);
h1 = h1*5 + 0xe6546b64;
h1 = h1 * 5 + 0xe6546b64;
}
}
/* tail */
{
const uint8_t *tail = (const uint8_t *) (data + nblocks*4);
const uint8_t *tail = (const uint8_t *)(data + nblocks * 4);
uint32_t k1 = 0;
switch (len & 3) {
case 3: k1 ^= tail[2] << 16; JEMALLOC_FALLTHROUGH;
case 2: k1 ^= tail[1] << 8; JEMALLOC_FALLTHROUGH;
case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15);
k1 *= c2; h1 ^= k1;
case 3:
k1 ^= tail[2] << 16;
JEMALLOC_FALLTHROUGH;
case 2:
k1 ^= tail[1] << 8;
JEMALLOC_FALLTHROUGH;
case 1:
k1 ^= tail[0];
k1 *= c1;
k1 = hash_rotl_32(k1, 15);
k1 *= c2;
h1 ^= k1;
}
}
@ -121,10 +129,9 @@ hash_x86_32(const void *key, int len, uint32_t seed) {
}
static inline void
hash_x86_128(const void *key, const int len, uint32_t seed,
uint64_t r_out[2]) {
const uint8_t * data = (const uint8_t *) key;
const int nblocks = len / 16;
hash_x86_128(const void *key, const int len, uint32_t seed, uint64_t r_out[2]) {
const uint8_t *data = (const uint8_t *)key;
const int nblocks = len / 16;
uint32_t h1 = seed;
uint32_t h2 = seed;
@ -138,95 +145,161 @@ hash_x86_128(const void *key, const int len, uint32_t seed,
/* body */
{
const uint32_t *blocks = (const uint32_t *) (data + nblocks*16);
int i;
const uint32_t *blocks = (const uint32_t *)(data
+ nblocks * 16);
int i;
for (i = -nblocks; i; i++) {
uint32_t k1 = hash_get_block_32(blocks, i*4 + 0);
uint32_t k2 = hash_get_block_32(blocks, i*4 + 1);
uint32_t k3 = hash_get_block_32(blocks, i*4 + 2);
uint32_t k4 = hash_get_block_32(blocks, i*4 + 3);
uint32_t k1 = hash_get_block_32(blocks, i * 4 + 0);
uint32_t k2 = hash_get_block_32(blocks, i * 4 + 1);
uint32_t k3 = hash_get_block_32(blocks, i * 4 + 2);
uint32_t k4 = hash_get_block_32(blocks, i * 4 + 3);
k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
k1 *= c1;
k1 = hash_rotl_32(k1, 15);
k1 *= c2;
h1 ^= k1;
h1 = hash_rotl_32(h1, 19); h1 += h2;
h1 = h1*5 + 0x561ccd1b;
h1 = hash_rotl_32(h1, 19);
h1 += h2;
h1 = h1 * 5 + 0x561ccd1b;
k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
k2 *= c2;
k2 = hash_rotl_32(k2, 16);
k2 *= c3;
h2 ^= k2;
h2 = hash_rotl_32(h2, 17); h2 += h3;
h2 = h2*5 + 0x0bcaa747;
h2 = hash_rotl_32(h2, 17);
h2 += h3;
h2 = h2 * 5 + 0x0bcaa747;
k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
k3 *= c3;
k3 = hash_rotl_32(k3, 17);
k3 *= c4;
h3 ^= k3;
h3 = hash_rotl_32(h3, 15); h3 += h4;
h3 = h3*5 + 0x96cd1c35;
h3 = hash_rotl_32(h3, 15);
h3 += h4;
h3 = h3 * 5 + 0x96cd1c35;
k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
k4 *= c4;
k4 = hash_rotl_32(k4, 18);
k4 *= c1;
h4 ^= k4;
h4 = hash_rotl_32(h4, 13); h4 += h1;
h4 = h4*5 + 0x32ac3b17;
h4 = hash_rotl_32(h4, 13);
h4 += h1;
h4 = h4 * 5 + 0x32ac3b17;
}
}
/* tail */
{
const uint8_t *tail = (const uint8_t *) (data + nblocks*16);
uint32_t k1 = 0;
uint32_t k2 = 0;
uint32_t k3 = 0;
uint32_t k4 = 0;
const uint8_t *tail = (const uint8_t *)(data + nblocks * 16);
uint32_t k1 = 0;
uint32_t k2 = 0;
uint32_t k3 = 0;
uint32_t k4 = 0;
switch (len & 15) {
case 15: k4 ^= tail[14] << 16; JEMALLOC_FALLTHROUGH;
case 14: k4 ^= tail[13] << 8; JEMALLOC_FALLTHROUGH;
case 13: k4 ^= tail[12] << 0;
k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
case 15:
k4 ^= tail[14] << 16;
JEMALLOC_FALLTHROUGH;
case 12: k3 ^= (uint32_t) tail[11] << 24; JEMALLOC_FALLTHROUGH;
case 11: k3 ^= tail[10] << 16; JEMALLOC_FALLTHROUGH;
case 10: k3 ^= tail[ 9] << 8; JEMALLOC_FALLTHROUGH;
case 9: k3 ^= tail[ 8] << 0;
k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
case 14:
k4 ^= tail[13] << 8;
JEMALLOC_FALLTHROUGH;
case 8: k2 ^= (uint32_t) tail[ 7] << 24; JEMALLOC_FALLTHROUGH;
case 7: k2 ^= tail[ 6] << 16; JEMALLOC_FALLTHROUGH;
case 6: k2 ^= tail[ 5] << 8; JEMALLOC_FALLTHROUGH;
case 5: k2 ^= tail[ 4] << 0;
k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
case 13:
k4 ^= tail[12] << 0;
k4 *= c4;
k4 = hash_rotl_32(k4, 18);
k4 *= c1;
h4 ^= k4;
JEMALLOC_FALLTHROUGH;
case 4: k1 ^= (uint32_t) tail[ 3] << 24; JEMALLOC_FALLTHROUGH;
case 3: k1 ^= tail[ 2] << 16; JEMALLOC_FALLTHROUGH;
case 2: k1 ^= tail[ 1] << 8; JEMALLOC_FALLTHROUGH;
case 1: k1 ^= tail[ 0] << 0;
k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
case 12:
k3 ^= (uint32_t)tail[11] << 24;
JEMALLOC_FALLTHROUGH;
case 11:
k3 ^= tail[10] << 16;
JEMALLOC_FALLTHROUGH;
case 10:
k3 ^= tail[9] << 8;
JEMALLOC_FALLTHROUGH;
case 9:
k3 ^= tail[8] << 0;
k3 *= c3;
k3 = hash_rotl_32(k3, 17);
k3 *= c4;
h3 ^= k3;
JEMALLOC_FALLTHROUGH;
case 8:
k2 ^= (uint32_t)tail[7] << 24;
JEMALLOC_FALLTHROUGH;
case 7:
k2 ^= tail[6] << 16;
JEMALLOC_FALLTHROUGH;
case 6:
k2 ^= tail[5] << 8;
JEMALLOC_FALLTHROUGH;
case 5:
k2 ^= tail[4] << 0;
k2 *= c2;
k2 = hash_rotl_32(k2, 16);
k2 *= c3;
h2 ^= k2;
JEMALLOC_FALLTHROUGH;
case 4:
k1 ^= (uint32_t)tail[3] << 24;
JEMALLOC_FALLTHROUGH;
case 3:
k1 ^= tail[2] << 16;
JEMALLOC_FALLTHROUGH;
case 2:
k1 ^= tail[1] << 8;
JEMALLOC_FALLTHROUGH;
case 1:
k1 ^= tail[0] << 0;
k1 *= c1;
k1 = hash_rotl_32(k1, 15);
k1 *= c2;
h1 ^= k1;
break;
}
}
/* finalization */
h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
h1 ^= len;
h2 ^= len;
h3 ^= len;
h4 ^= len;
h1 += h2; h1 += h3; h1 += h4;
h2 += h1; h3 += h1; h4 += h1;
h1 += h2;
h1 += h3;
h1 += h4;
h2 += h1;
h3 += h1;
h4 += h1;
h1 = hash_fmix_32(h1);
h2 = hash_fmix_32(h2);
h3 = hash_fmix_32(h3);
h4 = hash_fmix_32(h4);
h1 += h2; h1 += h3; h1 += h4;
h2 += h1; h3 += h1; h4 += h1;
h1 += h2;
h1 += h3;
h1 += h4;
h2 += h1;
h3 += h1;
h4 += h1;
r_out[0] = (((uint64_t) h2) << 32) | h1;
r_out[1] = (((uint64_t) h4) << 32) | h3;
r_out[0] = (((uint64_t)h2) << 32) | h1;
r_out[1] = (((uint64_t)h4) << 32) | h3;
}
static inline void
hash_x64_128(const void *key, const int len, const uint32_t seed,
uint64_t r_out[2]) {
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len / 16;
hash_x64_128(
const void *key, const int len, const uint32_t seed, uint64_t r_out[2]) {
const uint8_t *data = (const uint8_t *)key;
const int nblocks = len / 16;
uint64_t h1 = seed;
uint64_t h2 = seed;
@ -236,56 +309,99 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
/* body */
{
const uint64_t *blocks = (const uint64_t *) (data);
int i;
const uint64_t *blocks = (const uint64_t *)(data);
int i;
for (i = 0; i < nblocks; i++) {
uint64_t k1 = hash_get_block_64(blocks, i*2 + 0);
uint64_t k2 = hash_get_block_64(blocks, i*2 + 1);
uint64_t k1 = hash_get_block_64(blocks, i * 2 + 0);
uint64_t k2 = hash_get_block_64(blocks, i * 2 + 1);
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
k1 *= c1;
k1 = hash_rotl_64(k1, 31);
k1 *= c2;
h1 ^= k1;
h1 = hash_rotl_64(h1, 27); h1 += h2;
h1 = h1*5 + 0x52dce729;
h1 = hash_rotl_64(h1, 27);
h1 += h2;
h1 = h1 * 5 + 0x52dce729;
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
k2 *= c2;
k2 = hash_rotl_64(k2, 33);
k2 *= c1;
h2 ^= k2;
h2 = hash_rotl_64(h2, 31); h2 += h1;
h2 = h2*5 + 0x38495ab5;
h2 = hash_rotl_64(h2, 31);
h2 += h1;
h2 = h2 * 5 + 0x38495ab5;
}
}
/* tail */
{
const uint8_t *tail = (const uint8_t*)(data + nblocks*16);
uint64_t k1 = 0;
uint64_t k2 = 0;
const uint8_t *tail = (const uint8_t *)(data + nblocks * 16);
uint64_t k1 = 0;
uint64_t k2 = 0;
switch (len & 15) {
case 15: k2 ^= ((uint64_t)(tail[14])) << 48; JEMALLOC_FALLTHROUGH;
case 14: k2 ^= ((uint64_t)(tail[13])) << 40; JEMALLOC_FALLTHROUGH;
case 13: k2 ^= ((uint64_t)(tail[12])) << 32; JEMALLOC_FALLTHROUGH;
case 12: k2 ^= ((uint64_t)(tail[11])) << 24; JEMALLOC_FALLTHROUGH;
case 11: k2 ^= ((uint64_t)(tail[10])) << 16; JEMALLOC_FALLTHROUGH;
case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; JEMALLOC_FALLTHROUGH;
case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0;
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
case 15:
k2 ^= ((uint64_t)(tail[14])) << 48;
JEMALLOC_FALLTHROUGH;
case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; JEMALLOC_FALLTHROUGH;
case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; JEMALLOC_FALLTHROUGH;
case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; JEMALLOC_FALLTHROUGH;
case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; JEMALLOC_FALLTHROUGH;
case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; JEMALLOC_FALLTHROUGH;
case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; JEMALLOC_FALLTHROUGH;
case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; JEMALLOC_FALLTHROUGH;
case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0;
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
case 14:
k2 ^= ((uint64_t)(tail[13])) << 40;
JEMALLOC_FALLTHROUGH;
case 13:
k2 ^= ((uint64_t)(tail[12])) << 32;
JEMALLOC_FALLTHROUGH;
case 12:
k2 ^= ((uint64_t)(tail[11])) << 24;
JEMALLOC_FALLTHROUGH;
case 11:
k2 ^= ((uint64_t)(tail[10])) << 16;
JEMALLOC_FALLTHROUGH;
case 10:
k2 ^= ((uint64_t)(tail[9])) << 8;
JEMALLOC_FALLTHROUGH;
case 9:
k2 ^= ((uint64_t)(tail[8])) << 0;
k2 *= c2;
k2 = hash_rotl_64(k2, 33);
k2 *= c1;
h2 ^= k2;
JEMALLOC_FALLTHROUGH;
case 8:
k1 ^= ((uint64_t)(tail[7])) << 56;
JEMALLOC_FALLTHROUGH;
case 7:
k1 ^= ((uint64_t)(tail[6])) << 48;
JEMALLOC_FALLTHROUGH;
case 6:
k1 ^= ((uint64_t)(tail[5])) << 40;
JEMALLOC_FALLTHROUGH;
case 5:
k1 ^= ((uint64_t)(tail[4])) << 32;
JEMALLOC_FALLTHROUGH;
case 4:
k1 ^= ((uint64_t)(tail[3])) << 24;
JEMALLOC_FALLTHROUGH;
case 3:
k1 ^= ((uint64_t)(tail[2])) << 16;
JEMALLOC_FALLTHROUGH;
case 2:
k1 ^= ((uint64_t)(tail[1])) << 8;
JEMALLOC_FALLTHROUGH;
case 1:
k1 ^= ((uint64_t)(tail[0])) << 0;
k1 *= c1;
k1 = hash_rotl_64(k1, 31);
k1 *= c2;
h1 ^= k1;
break;
}
}
/* finalization */
h1 ^= len; h2 ^= len;
h1 ^= len;
h2 ^= len;
h1 += h2;
h2 += h1;

View file

@ -83,7 +83,6 @@ enum hook_dalloc_e {
};
typedef enum hook_dalloc_e hook_dalloc_t;
enum hook_expand_e {
hook_expand_realloc,
hook_expand_rallocx,
@ -91,23 +90,22 @@ enum hook_expand_e {
};
typedef enum hook_expand_e hook_expand_t;
typedef void (*hook_alloc)(
void *extra, hook_alloc_t type, void *result, uintptr_t result_raw,
uintptr_t args_raw[3]);
typedef void (*hook_alloc)(void *extra, hook_alloc_t type, void *result,
uintptr_t result_raw, uintptr_t args_raw[3]);
typedef void (*hook_dalloc)(
void *extra, hook_dalloc_t type, void *address, uintptr_t args_raw[3]);
typedef void (*hook_expand)(
void *extra, hook_expand_t type, void *address, size_t old_usize,
size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]);
typedef void (*hook_expand)(void *extra, hook_expand_t type, void *address,
size_t old_usize, size_t new_usize, uintptr_t result_raw,
uintptr_t args_raw[4]);
typedef struct hooks_s hooks_t;
struct hooks_s {
hook_alloc alloc_hook;
hook_alloc alloc_hook;
hook_dalloc dalloc_hook;
hook_expand expand_hook;
void *extra;
void *extra;
};
/*
@ -156,8 +154,8 @@ void hook_remove(tsdn_t *tsdn, void *opaque);
void hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw,
uintptr_t args_raw[3]);
void hook_invoke_dalloc(hook_dalloc_t type, void *address,
uintptr_t args_raw[3]);
void hook_invoke_dalloc(
hook_dalloc_t type, void *address, uintptr_t args_raw[3]);
void hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize,
size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]);

View file

@ -27,7 +27,7 @@ struct hpa_central_s {
*
* Guarded by grow_mtx.
*/
void *eden;
void *eden;
size_t eden_len;
/* Source for metadata. */
base_t *base;
@ -78,7 +78,7 @@ struct hpa_shard_nonderived_stats_s {
/* Completely derived; only used by CTL. */
typedef struct hpa_shard_stats_s hpa_shard_stats_t;
struct hpa_shard_stats_s {
psset_stats_t psset_stats;
psset_stats_t psset_stats;
hpa_shard_nonderived_stats_t nonderived_stats;
};
@ -156,14 +156,15 @@ bool hpa_hugepage_size_exceeds_limit(void);
* just that it can function properly given the system it's running on.
*/
bool hpa_supported(void);
bool hpa_central_init(hpa_central_t *central, base_t *base, const hpa_hooks_t *hooks);
bool hpa_central_init(
hpa_central_t *central, base_t *base, const hpa_hooks_t *hooks);
bool hpa_shard_init(hpa_shard_t *shard, hpa_central_t *central, emap_t *emap,
base_t *base, edata_cache_t *edata_cache, unsigned ind,
const hpa_shard_opts_t *opts);
void hpa_shard_stats_accum(hpa_shard_stats_t *dst, hpa_shard_stats_t *src);
void hpa_shard_stats_merge(tsdn_t *tsdn, hpa_shard_t *shard,
hpa_shard_stats_t *dst);
void hpa_shard_stats_merge(
tsdn_t *tsdn, hpa_shard_t *shard, hpa_shard_stats_t *dst);
/*
* Notify the shard that we won't use it for allocations much longer. Due to
@ -173,8 +174,8 @@ void hpa_shard_stats_merge(tsdn_t *tsdn, hpa_shard_t *shard,
void hpa_shard_disable(tsdn_t *tsdn, hpa_shard_t *shard);
void hpa_shard_destroy(tsdn_t *tsdn, hpa_shard_t *shard);
void hpa_shard_set_deferral_allowed(tsdn_t *tsdn, hpa_shard_t *shard,
bool deferral_allowed);
void hpa_shard_set_deferral_allowed(
tsdn_t *tsdn, hpa_shard_t *shard, bool deferral_allowed);
void hpa_shard_do_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard);
/*

View file

@ -13,7 +13,7 @@ struct hpa_hooks_s {
void (*dehugify)(void *ptr, size_t size);
void (*curtime)(nstime_t *r_time, bool first_reading);
uint64_t (*ms_since)(nstime_t *r_time);
bool (*vectorized_purge)(void* vec, size_t vlen, size_t nbytes);
bool (*vectorized_purge)(void *vec, size_t vlen, size_t nbytes);
};
extern const hpa_hooks_t hpa_hooks_default;

View file

@ -8,26 +8,27 @@
typedef struct iovec hpa_io_vector_t;
#else
typedef struct {
void *iov_base;
size_t iov_len;
void *iov_base;
size_t iov_len;
} hpa_io_vector_t;
#endif
/* Actually invoke hooks. If we fail vectorized, use single purges */
static void
hpa_try_vectorized_purge(
hpa_shard_t *shard, hpa_io_vector_t *vec, size_t vlen, size_t nbytes) {
bool success = opt_process_madvise_max_batch > 0
&& !shard->central->hooks.vectorized_purge(vec, vlen, nbytes);
if (!success) {
/* On failure, it is safe to purge again (potential perf
hpa_shard_t *shard, hpa_io_vector_t *vec, size_t vlen, size_t nbytes) {
bool success = opt_process_madvise_max_batch > 0
&& !shard->central->hooks.vectorized_purge(vec, vlen, nbytes);
if (!success) {
/* On failure, it is safe to purge again (potential perf
* penalty) If kernel can tell exactly which regions
* failed, we could avoid that penalty.
*/
for (size_t i = 0; i < vlen; ++i) {
shard->central->hooks.purge(vec[i].iov_base, vec[i].iov_len);
}
}
for (size_t i = 0; i < vlen; ++i) {
shard->central->hooks.purge(
vec[i].iov_base, vec[i].iov_len);
}
}
}
/*
@ -35,48 +36,48 @@ hpa_try_vectorized_purge(
* It invokes the hook when batch limit is reached
*/
typedef struct {
hpa_io_vector_t *vp;
size_t cur;
size_t total_bytes;
size_t capacity;
hpa_io_vector_t *vp;
size_t cur;
size_t total_bytes;
size_t capacity;
} hpa_range_accum_t;
static inline void
hpa_range_accum_init(hpa_range_accum_t *ra, hpa_io_vector_t *v, size_t sz) {
ra->vp = v;
ra->capacity = sz;
ra->total_bytes = 0;
ra->cur = 0;
ra->vp = v;
ra->capacity = sz;
ra->total_bytes = 0;
ra->cur = 0;
}
static inline void
hpa_range_accum_flush(hpa_range_accum_t *ra, hpa_shard_t *shard) {
assert(ra->total_bytes > 0 && ra->cur > 0);
hpa_try_vectorized_purge(shard, ra->vp, ra->cur, ra->total_bytes);
ra->cur = 0;
ra->total_bytes = 0;
assert(ra->total_bytes > 0 && ra->cur > 0);
hpa_try_vectorized_purge(shard, ra->vp, ra->cur, ra->total_bytes);
ra->cur = 0;
ra->total_bytes = 0;
}
static inline void
hpa_range_accum_add(
hpa_range_accum_t *ra, void *addr, size_t sz, hpa_shard_t *shard) {
assert(ra->cur < ra->capacity);
hpa_range_accum_t *ra, void *addr, size_t sz, hpa_shard_t *shard) {
assert(ra->cur < ra->capacity);
ra->vp[ra->cur].iov_base = addr;
ra->vp[ra->cur].iov_len = sz;
ra->total_bytes += sz;
ra->cur++;
ra->vp[ra->cur].iov_base = addr;
ra->vp[ra->cur].iov_len = sz;
ra->total_bytes += sz;
ra->cur++;
if (ra->cur == ra->capacity) {
hpa_range_accum_flush(ra, shard);
}
if (ra->cur == ra->capacity) {
hpa_range_accum_flush(ra, shard);
}
}
static inline void
hpa_range_accum_finish(hpa_range_accum_t *ra, hpa_shard_t *shard) {
if (ra->cur > 0) {
hpa_range_accum_flush(ra, shard);
}
if (ra->cur > 0) {
hpa_range_accum_flush(ra, shard);
}
}
/*
@ -84,14 +85,14 @@ hpa_range_accum_finish(hpa_range_accum_t *ra, hpa_shard_t *shard) {
*/
typedef struct {
hpdata_purge_state_t state;
hpdata_t *hp;
bool dehugify;
hpdata_t *hp;
bool dehugify;
} hpa_purge_item_t;
typedef struct hpa_purge_batch_s hpa_purge_batch_t;
struct hpa_purge_batch_s {
hpa_purge_item_t *items;
size_t items_capacity;
size_t items_capacity;
/* Number of huge pages to purge in current batch */
size_t item_cnt;
/* Number of ranges to purge in current batch */

View file

@ -73,7 +73,7 @@ struct hpdata_s {
bool h_hugify_allowed;
/* When we became a hugification candidate. */
nstime_t h_time_hugify_allowed;
bool h_in_psset_hugify_container;
bool h_in_psset_hugify_container;
/* Whether or not a purge or hugify is currently happening. */
bool h_mid_purge;
@ -186,8 +186,8 @@ hpdata_purge_allowed_get(const hpdata_t *hpdata) {
static inline void
hpdata_purge_allowed_set(hpdata_t *hpdata, bool purge_allowed) {
assert(purge_allowed == false || !hpdata->h_mid_purge);
hpdata->h_purge_allowed = purge_allowed;
assert(purge_allowed == false || !hpdata->h_mid_purge);
hpdata->h_purge_allowed = purge_allowed;
}
static inline bool
@ -250,7 +250,6 @@ hpdata_changing_state_get(const hpdata_t *hpdata) {
return hpdata->h_mid_purge || hpdata->h_mid_hugify;
}
static inline bool
hpdata_updating_get(const hpdata_t *hpdata) {
return hpdata->h_updating;
@ -317,7 +316,7 @@ hpdata_assert_empty(hpdata_t *hpdata) {
*/
static inline bool
hpdata_consistent(hpdata_t *hpdata) {
if(fb_urange_longest(hpdata->active_pages, HUGEPAGE_PAGES)
if (fb_urange_longest(hpdata->active_pages, HUGEPAGE_PAGES)
!= hpdata_longest_free_range_get(hpdata)) {
return false;
}
@ -368,7 +367,7 @@ void hpdata_init(hpdata_t *hpdata, void *addr, uint64_t age);
* offset within that allocation.
*/
void *hpdata_reserve_alloc(hpdata_t *hpdata, size_t sz);
void hpdata_unreserve(hpdata_t *hpdata, void *addr, size_t sz);
void hpdata_unreserve(hpdata_t *hpdata, void *addr, size_t sz);
/*
* The hpdata_purge_prepare_t allows grabbing the metadata required to purge
@ -377,10 +376,10 @@ void hpdata_unreserve(hpdata_t *hpdata, void *addr, size_t sz);
*/
typedef struct hpdata_purge_state_s hpdata_purge_state_t;
struct hpdata_purge_state_s {
size_t npurged;
size_t ndirty_to_purge;
size_t npurged;
size_t ndirty_to_purge;
fb_group_t to_purge[FB_NGROUPS(HUGEPAGE_PAGES)];
size_t next_purge_search_begin;
size_t next_purge_search_begin;
};
/*
@ -398,8 +397,8 @@ struct hpdata_purge_state_s {
* Returns the number of dirty pages that will be purged and sets nranges
* to number of ranges with dirty pages that will be purged.
*/
size_t hpdata_purge_begin(hpdata_t *hpdata, hpdata_purge_state_t *purge_state,
size_t *nranges);
size_t hpdata_purge_begin(
hpdata_t *hpdata, hpdata_purge_state_t *purge_state, size_t *nranges);
/*
* If there are more extents to purge, sets *r_purge_addr and *r_purge_size to

View file

@ -26,7 +26,7 @@ typedef struct inspect_extent_util_stats_verbose_s
inspect_extent_util_stats_verbose_t;
struct inspect_extent_util_stats_verbose_s {
void *slabcur_addr;
void *slabcur_addr;
size_t nfree;
size_t nregs;
size_t size;
@ -34,10 +34,10 @@ struct inspect_extent_util_stats_verbose_s {
size_t bin_nregs;
};
void inspect_extent_util_stats_get(tsdn_t *tsdn, const void *ptr,
size_t *nfree, size_t *nregs, size_t *size);
void inspect_extent_util_stats_get(
tsdn_t *tsdn, const void *ptr, size_t *nfree, size_t *nregs, size_t *size);
void inspect_extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
size_t *nfree, size_t *nregs, size_t *size,
size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr);
size_t *nfree, size_t *nregs, size_t *size, size_t *bin_nfree,
size_t *bin_nregs, void **slabcur_addr);
#endif /* JEMALLOC_INTERNAL_INSPECT_H */

View file

@ -3,64 +3,65 @@
#include <math.h>
#ifdef _WIN32
# include <windows.h>
# include "msvc_compat/windows_extra.h"
# include "msvc_compat/strings.h"
# ifdef _WIN64
# if LG_VADDR <= 32
# error Generate the headers using x64 vcargs
# endif
# else
# if LG_VADDR > 32
# undef LG_VADDR
# define LG_VADDR 32
# endif
# endif
# include <windows.h>
# include "msvc_compat/windows_extra.h"
# include "msvc_compat/strings.h"
# ifdef _WIN64
# if LG_VADDR <= 32
# error Generate the headers using x64 vcargs
# endif
# else
# if LG_VADDR > 32
# undef LG_VADDR
# define LG_VADDR 32
# endif
# endif
#else
# include <sys/param.h>
# include <sys/mman.h>
# if !defined(__pnacl__) && !defined(__native_client__)
# include <sys/syscall.h>
# if !defined(SYS_write) && defined(__NR_write)
# define SYS_write __NR_write
# endif
# if defined(SYS_open) && defined(__aarch64__)
/* Android headers may define SYS_open to __NR_open even though
# include <sys/param.h>
# include <sys/mman.h>
# if !defined(__pnacl__) && !defined(__native_client__)
# include <sys/syscall.h>
# if !defined(SYS_write) && defined(__NR_write)
# define SYS_write __NR_write
# endif
# if defined(SYS_open) && defined(__aarch64__)
/* Android headers may define SYS_open to __NR_open even though
* __NR_open may not exist on AArch64 (superseded by __NR_openat). */
# undef SYS_open
# endif
# include <sys/uio.h>
# endif
# include <pthread.h>
# if defined(__FreeBSD__) || defined(__DragonFly__) || defined(__OpenBSD__)
# include <pthread_np.h>
# include <sched.h>
# if defined(__FreeBSD__)
# define cpu_set_t cpuset_t
# endif
# endif
# include <signal.h>
# ifdef JEMALLOC_OS_UNFAIR_LOCK
# include <os/lock.h>
# endif
# ifdef JEMALLOC_GLIBC_MALLOC_HOOK
# include <sched.h>
# endif
# include <errno.h>
# include <sys/time.h>
# include <time.h>
# ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
# include <mach/mach_time.h>
# endif
# undef SYS_open
# endif
# include <sys/uio.h>
# endif
# include <pthread.h>
# if defined(__FreeBSD__) || defined(__DragonFly__) \
|| defined(__OpenBSD__)
# include <pthread_np.h>
# include <sched.h>
# if defined(__FreeBSD__)
# define cpu_set_t cpuset_t
# endif
# endif
# include <signal.h>
# ifdef JEMALLOC_OS_UNFAIR_LOCK
# include <os/lock.h>
# endif
# ifdef JEMALLOC_GLIBC_MALLOC_HOOK
# include <sched.h>
# endif
# include <errno.h>
# include <sys/time.h>
# include <time.h>
# ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
# include <mach/mach_time.h>
# endif
#endif
#include <sys/types.h>
#include <limits.h>
#ifndef SIZE_T_MAX
# define SIZE_T_MAX SIZE_MAX
# define SIZE_T_MAX SIZE_MAX
#endif
#ifndef SSIZE_MAX
# define SSIZE_MAX ((ssize_t)(SIZE_T_MAX >> 1))
# define SSIZE_MAX ((ssize_t)(SIZE_T_MAX >> 1))
#endif
#include <stdarg.h>
#include <stdbool.h>
@ -69,30 +70,30 @@
#include <stdint.h>
#include <stddef.h>
#ifndef offsetof
# define offsetof(type, member) ((size_t)&(((type *)NULL)->member))
# define offsetof(type, member) ((size_t) & (((type *)NULL)->member))
#endif
#include <string.h>
#include <strings.h>
#include <ctype.h>
#ifdef _MSC_VER
# include <io.h>
# include <io.h>
typedef intptr_t ssize_t;
# define PATH_MAX 1024
# define STDERR_FILENO 2
# define __func__ __FUNCTION__
# ifdef JEMALLOC_HAS_RESTRICT
# define restrict __restrict
# endif
# define PATH_MAX 1024
# define STDERR_FILENO 2
# define __func__ __FUNCTION__
# ifdef JEMALLOC_HAS_RESTRICT
# define restrict __restrict
# endif
/* Disable warnings about deprecated system functions. */
# pragma warning(disable: 4996)
#if _MSC_VER < 1800
# pragma warning(disable : 4996)
# if _MSC_VER < 1800
static int
isblank(int c) {
return (c == '\t' || c == ' ');
}
#endif
# endif
#else
# include <unistd.h>
# include <unistd.h>
#endif
#include <fcntl.h>
@ -102,7 +103,7 @@ isblank(int c) {
* classes.
*/
#ifdef small
# undef small
# undef small
#endif
/*

View file

@ -12,34 +12,34 @@
extern bool malloc_slow;
/* Run-time options. */
extern bool opt_abort;
extern bool opt_abort_conf;
extern bool opt_trust_madvise;
extern bool opt_confirm_conf;
extern bool opt_hpa;
extern bool opt_abort;
extern bool opt_abort_conf;
extern bool opt_trust_madvise;
extern bool opt_confirm_conf;
extern bool opt_hpa;
extern hpa_shard_opts_t opt_hpa_opts;
extern sec_opts_t opt_hpa_sec_opts;
extern sec_opts_t opt_hpa_sec_opts;
extern const char *opt_junk;
extern bool opt_junk_alloc;
extern bool opt_junk_free;
extern bool opt_junk_alloc;
extern bool opt_junk_free;
extern void (*JET_MUTABLE junk_free_callback)(void *ptr, size_t size);
extern void (*JET_MUTABLE junk_alloc_callback)(void *ptr, size_t size);
extern void (*JET_MUTABLE invalid_conf_abort)(void);
extern bool opt_utrace;
extern bool opt_xmalloc;
extern bool opt_experimental_infallible_new;
extern bool opt_experimental_tcache_gc;
extern bool opt_zero;
extern unsigned opt_narenas;
extern bool opt_utrace;
extern bool opt_xmalloc;
extern bool opt_experimental_infallible_new;
extern bool opt_experimental_tcache_gc;
extern bool opt_zero;
extern unsigned opt_narenas;
extern zero_realloc_action_t opt_zero_realloc_action;
extern malloc_init_t malloc_init_state;
extern const char *const zero_realloc_mode_names[];
extern atomic_zu_t zero_realloc_count;
extern bool opt_cache_oblivious;
extern unsigned opt_debug_double_free_max_scan;
extern size_t opt_calloc_madvise_threshold;
extern bool opt_disable_large_size_classes;
extern malloc_init_t malloc_init_state;
extern const char *const zero_realloc_mode_names[];
extern atomic_zu_t zero_realloc_count;
extern bool opt_cache_oblivious;
extern unsigned opt_debug_double_free_max_scan;
extern size_t opt_calloc_madvise_threshold;
extern bool opt_disable_large_size_classes;
extern const char *opt_malloc_conf_symlink;
extern const char *opt_malloc_conf_env_var;
@ -64,24 +64,24 @@ extern atomic_p_t arenas[];
extern unsigned huge_arena_ind;
void *a0malloc(size_t size);
void a0dalloc(void *ptr);
void *bootstrap_malloc(size_t size);
void *bootstrap_calloc(size_t num, size_t size);
void bootstrap_free(void *ptr);
void arena_set(unsigned ind, arena_t *arena);
void *a0malloc(size_t size);
void a0dalloc(void *ptr);
void *bootstrap_malloc(size_t size);
void *bootstrap_calloc(size_t num, size_t size);
void bootstrap_free(void *ptr);
void arena_set(unsigned ind, arena_t *arena);
unsigned narenas_total_get(void);
arena_t *arena_init(tsdn_t *tsdn, unsigned ind, const arena_config_t *config);
arena_t *arena_choose_hard(tsd_t *tsd, bool internal);
void arena_migrate(tsd_t *tsd, arena_t *oldarena, arena_t *newarena);
void iarena_cleanup(tsd_t *tsd);
void arena_cleanup(tsd_t *tsd);
size_t batch_alloc(void **ptrs, size_t num, size_t size, int flags);
void jemalloc_prefork(void);
void jemalloc_postfork_parent(void);
void jemalloc_postfork_child(void);
void sdallocx_default(void *ptr, size_t size, int flags);
void free_default(void *ptr);
void *malloc_default(size_t size);
void arena_migrate(tsd_t *tsd, arena_t *oldarena, arena_t *newarena);
void iarena_cleanup(tsd_t *tsd);
void arena_cleanup(tsd_t *tsd);
size_t batch_alloc(void **ptrs, size_t num, size_t size, int flags);
void jemalloc_prefork(void);
void jemalloc_postfork_parent(void);
void jemalloc_postfork_child(void);
void sdallocx_default(void *ptr, size_t size, int flags);
void free_default(void *ptr);
void *malloc_default(size_t size);
#endif /* JEMALLOC_INTERNAL_EXTERNS_H */

View file

@ -20,12 +20,12 @@ malloc_getcpu(void) {
return (malloc_cpuid_t)sched_getcpu();
#elif defined(JEMALLOC_HAVE_RDTSCP)
unsigned int ecx;
asm volatile("rdtscp" : "=c" (ecx) :: "eax", "edx");
asm volatile("rdtscp" : "=c"(ecx)::"eax", "edx");
return (malloc_cpuid_t)(ecx & 0xfff);
#elif defined(__aarch64__) && defined(__APPLE__)
/* Other oses most likely use tpidr_el0 instead */
uintptr_t c;
asm volatile("mrs %x0, tpidrro_el0" : "=r"(c) :: "memory");
asm volatile("mrs %x0, tpidrro_el0" : "=r"(c)::"memory");
return (malloc_cpuid_t)(c & (1 << 3) - 1);
#else
not_reached();
@ -42,8 +42,8 @@ percpu_arena_choose(void) {
assert(cpuid >= 0);
unsigned arena_ind;
if ((opt_percpu_arena == percpu_arena) || ((unsigned)cpuid < ncpus /
2)) {
if ((opt_percpu_arena == percpu_arena)
|| ((unsigned)cpuid < ncpus / 2)) {
arena_ind = cpuid;
} else {
assert(opt_percpu_arena == per_phycpu_arena);

View file

@ -24,13 +24,12 @@ percpu_arena_update(tsd_t *tsd, unsigned cpu) {
if (tcache != NULL) {
tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd);
assert(tcache_slow->arena != NULL);
tcache_arena_reassociate(tsd_tsdn(tsd), tcache_slow,
tcache, newarena);
tcache_arena_reassociate(
tsd_tsdn(tsd), tcache_slow, tcache, newarena);
}
}
}
/* Choose an arena based on a per-thread value. */
static inline arena_t *
arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) {
@ -51,18 +50,18 @@ arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) {
assert(ret);
if (tcache_available(tsd)) {
tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd);
tcache_t *tcache = tsd_tcachep_get(tsd);
tcache_t *tcache = tsd_tcachep_get(tsd);
if (tcache_slow->arena != NULL) {
/* See comments in tsd_tcache_data_init().*/
assert(tcache_slow->arena ==
arena_get(tsd_tsdn(tsd), 0, false));
assert(tcache_slow->arena
== arena_get(tsd_tsdn(tsd), 0, false));
if (tcache_slow->arena != ret) {
tcache_arena_reassociate(tsd_tsdn(tsd),
tcache_slow, tcache, ret);
}
} else {
tcache_arena_associate(tsd_tsdn(tsd),
tcache_slow, tcache, ret);
tcache_arena_associate(
tsd_tsdn(tsd), tcache_slow, tcache, ret);
}
}
}
@ -72,10 +71,10 @@ arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) {
* auto percpu arena range, (i.e. thread is assigned to a manually
* managed arena), then percpu arena is skipped.
*/
if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena) &&
!internal && (arena_ind_get(ret) <
percpu_arena_ind_limit(opt_percpu_arena)) && (ret->last_thd !=
tsd_tsdn(tsd))) {
if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)
&& !internal
&& (arena_ind_get(ret) < percpu_arena_ind_limit(opt_percpu_arena))
&& (ret->last_thd != tsd_tsdn(tsd))) {
unsigned ind = percpu_arena_choose();
if (arena_ind_get(ret) != ind) {
percpu_arena_update(tsd, ind);

View file

@ -63,11 +63,12 @@ iallocztm_explicit_slab(tsdn_t *tsdn, size_t size, szind_t ind, bool zero,
assert(!is_internal || tcache == NULL);
assert(!is_internal || arena == NULL || arena_is_auto(arena));
if (!tsdn_null(tsdn) && tsd_reentrancy_level_get(tsdn_tsd(tsdn)) == 0) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
witness_assert_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0);
}
ret = arena_malloc(tsdn, arena, size, ind, zero, slab, tcache, slow_path);
ret = arena_malloc(
tsdn, arena, size, ind, zero, slab, tcache, slow_path);
if (config_stats && is_internal && likely(ret != NULL)) {
arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret));
}
@ -78,8 +79,8 @@ JEMALLOC_ALWAYS_INLINE void *
iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
bool is_internal, arena_t *arena, bool slow_path) {
bool slab = sz_can_use_slab(size);
return iallocztm_explicit_slab(tsdn, size, ind, zero, slab, tcache,
is_internal, arena, slow_path);
return iallocztm_explicit_slab(
tsdn, size, ind, zero, slab, tcache, is_internal, arena, slow_path);
}
JEMALLOC_ALWAYS_INLINE void *
@ -89,8 +90,8 @@ ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) {
}
JEMALLOC_ALWAYS_INLINE void *
ipallocztm_explicit_slab(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
bool slab, tcache_t *tcache, bool is_internal, arena_t *arena) {
ipallocztm_explicit_slab(tsdn_t *tsdn, size_t usize, size_t alignment,
bool zero, bool slab, tcache_t *tcache, bool is_internal, arena_t *arena) {
void *ret;
assert(!slab || sz_can_use_slab(usize)); /* slab && large is illegal */
@ -98,8 +99,8 @@ ipallocztm_explicit_slab(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero
assert(usize == sz_sa2u(usize, alignment));
assert(!is_internal || tcache == NULL);
assert(!is_internal || arena == NULL || arena_is_auto(arena));
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
witness_assert_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0);
ret = arena_palloc(tsdn, arena, usize, alignment, zero, slab, tcache);
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
@ -123,10 +124,10 @@ ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
}
JEMALLOC_ALWAYS_INLINE void *
ipalloct_explicit_slab(tsdn_t *tsdn, size_t usize, size_t alignment,
bool zero, bool slab, tcache_t *tcache, arena_t *arena) {
return ipallocztm_explicit_slab(tsdn, usize, alignment, zero, slab,
tcache, false, arena);
ipalloct_explicit_slab(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
bool slab, tcache_t *tcache, arena_t *arena) {
return ipallocztm_explicit_slab(
tsdn, usize, alignment, zero, slab, tcache, false, arena);
}
JEMALLOC_ALWAYS_INLINE void *
@ -146,13 +147,13 @@ idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
assert(ptr != NULL);
assert(!is_internal || tcache == NULL);
assert(!is_internal || arena_is_auto(iaalloc(tsdn, ptr)));
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
witness_assert_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0);
if (config_stats && is_internal) {
arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, ptr));
}
if (!is_internal && !tsdn_null(tsdn) &&
tsd_reentrancy_level_get(tsdn_tsd(tsdn)) != 0) {
if (!is_internal && !tsdn_null(tsdn)
&& tsd_reentrancy_level_get(tsdn_tsd(tsdn)) != 0) {
assert(tcache == NULL);
}
arena_dalloc(tsdn, ptr, tcache, alloc_ctx, slow_path);
@ -166,8 +167,8 @@ idalloc(tsd_t *tsd, void *ptr) {
JEMALLOC_ALWAYS_INLINE void
isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
emap_alloc_ctx_t *alloc_ctx, bool slow_path) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
witness_assert_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0);
arena_sdalloc(tsdn, ptr, size, tcache, alloc_ctx, slow_path);
}
@ -175,17 +176,17 @@ JEMALLOC_ALWAYS_INLINE void *
iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
size_t alignment, bool zero, bool slab, tcache_t *tcache, arena_t *arena,
hook_ralloc_args_t *hook_args) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
void *p;
witness_assert_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0);
void *p;
size_t usize, copysize;
usize = sz_sa2u(size, alignment);
if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
return NULL;
}
p = ipalloct_explicit_slab(tsdn, usize, alignment, zero, slab,
tcache, arena);
p = ipalloct_explicit_slab(
tsdn, usize, alignment, zero, slab, tcache, arena);
if (p == NULL) {
return NULL;
}
@ -195,11 +196,12 @@ iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(p, ptr, copysize);
hook_invoke_alloc(hook_args->is_realloc
? hook_alloc_realloc : hook_alloc_rallocx, p, (uintptr_t)p,
hook_args->args);
hook_invoke_dalloc(hook_args->is_realloc
? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args);
hook_invoke_alloc(
hook_args->is_realloc ? hook_alloc_realloc : hook_alloc_rallocx, p,
(uintptr_t)p, hook_args->args);
hook_invoke_dalloc(
hook_args->is_realloc ? hook_dalloc_realloc : hook_dalloc_rallocx,
ptr, hook_args->args);
isdalloct(tsdn, ptr, oldsize, tcache, NULL, true);
return p;
}
@ -214,15 +216,14 @@ iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
JEMALLOC_ALWAYS_INLINE void *
iralloct_explicit_slab(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
size_t alignment, bool zero, bool slab, tcache_t *tcache, arena_t *arena,
hook_ralloc_args_t *hook_args)
{
hook_ralloc_args_t *hook_args) {
assert(ptr != NULL);
assert(size != 0);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
witness_assert_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0);
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
if (alignment != 0
&& ((uintptr_t)ptr & ((uintptr_t)alignment - 1)) != 0) {
/*
* Existing object alignment is inadequate; allocate new space
* and copy.
@ -238,8 +239,7 @@ iralloct_explicit_slab(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
JEMALLOC_ALWAYS_INLINE void *
iralloct(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t alignment,
size_t usize, bool zero, tcache_t *tcache, arena_t *arena,
hook_ralloc_args_t *hook_args)
{
hook_ralloc_args_t *hook_args) {
bool slab = sz_can_use_slab(usize);
return iralloct_explicit_slab(tsdn, ptr, oldsize, size, alignment, zero,
slab, tcache, arena, hook_args);
@ -257,23 +257,23 @@ ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero, size_t *newsize) {
assert(ptr != NULL);
assert(size != 0);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
witness_assert_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0);
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
if (alignment != 0
&& ((uintptr_t)ptr & ((uintptr_t)alignment - 1)) != 0) {
/* Existing object alignment is inadequate. */
*newsize = oldsize;
return true;
}
return arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero,
newsize);
return arena_ralloc_no_move(
tsdn, ptr, oldsize, size, extra, zero, newsize);
}
JEMALLOC_ALWAYS_INLINE void
fastpath_success_finish(tsd_t *tsd, uint64_t allocated_after,
cache_bin_t *bin, void *ret) {
fastpath_success_finish(
tsd_t *tsd, uint64_t allocated_after, cache_bin_t *bin, void *ret) {
thread_allocated_set(tsd, allocated_after);
if (config_stats) {
bin->tstats.nrequests++;
@ -331,8 +331,8 @@ imalloc_fastpath(size_t size, void *(fallback_alloc)(size_t)) {
sz_size2index_usize_fastpath(size, &ind, &usize);
/* Fast path relies on size being a bin. */
assert(ind < SC_NBINS);
assert((SC_LOOKUP_MAXCLASS < SC_SMALL_MAXCLASS) &&
(size <= SC_SMALL_MAXCLASS));
assert((SC_LOOKUP_MAXCLASS < SC_SMALL_MAXCLASS)
&& (size <= SC_SMALL_MAXCLASS));
uint64_t allocated, threshold;
te_malloc_fastpath_ctx(tsd, &allocated, &threshold);
@ -363,7 +363,7 @@ imalloc_fastpath(size_t size, void *(fallback_alloc)(size_t)) {
cache_bin_t *bin = &tcache->bins[ind];
/* Suppress spurious warning from static analysis */
assert(bin != NULL);
bool tcache_success;
bool tcache_success;
void *ret;
/*
@ -388,56 +388,56 @@ imalloc_fastpath(size_t size, void *(fallback_alloc)(size_t)) {
JEMALLOC_ALWAYS_INLINE tcache_t *
tcache_get_from_ind(tsd_t *tsd, unsigned tcache_ind, bool slow, bool is_alloc) {
tcache_t *tcache;
if (tcache_ind == TCACHE_IND_AUTOMATIC) {
if (likely(!slow)) {
/* Getting tcache ptr unconditionally. */
tcache = tsd_tcachep_get(tsd);
assert(tcache == tcache_get(tsd));
} else if (is_alloc ||
likely(tsd_reentrancy_level_get(tsd) == 0)) {
tcache = tcache_get(tsd);
} else {
tcache = NULL;
}
} else {
/*
tcache_t *tcache;
if (tcache_ind == TCACHE_IND_AUTOMATIC) {
if (likely(!slow)) {
/* Getting tcache ptr unconditionally. */
tcache = tsd_tcachep_get(tsd);
assert(tcache == tcache_get(tsd));
} else if (is_alloc
|| likely(tsd_reentrancy_level_get(tsd) == 0)) {
tcache = tcache_get(tsd);
} else {
tcache = NULL;
}
} else {
/*
* Should not specify tcache on deallocation path when being
* reentrant.
*/
assert(is_alloc || tsd_reentrancy_level_get(tsd) == 0 ||
tsd_state_nocleanup(tsd));
if (tcache_ind == TCACHE_IND_NONE) {
tcache = NULL;
} else {
tcache = tcaches_get(tsd, tcache_ind);
}
}
return tcache;
assert(is_alloc || tsd_reentrancy_level_get(tsd) == 0
|| tsd_state_nocleanup(tsd));
if (tcache_ind == TCACHE_IND_NONE) {
tcache = NULL;
} else {
tcache = tcaches_get(tsd, tcache_ind);
}
}
return tcache;
}
JEMALLOC_ALWAYS_INLINE bool
maybe_check_alloc_ctx(tsd_t *tsd, void *ptr, emap_alloc_ctx_t *alloc_ctx) {
if (config_opt_size_checks) {
emap_alloc_ctx_t dbg_ctx;
emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr,
&dbg_ctx);
if (alloc_ctx->szind != dbg_ctx.szind) {
safety_check_fail_sized_dealloc(
/* current_dealloc */ true, ptr,
/* true_size */ emap_alloc_ctx_usize_get(&dbg_ctx),
/* input_size */ emap_alloc_ctx_usize_get(
alloc_ctx));
return true;
}
if (alloc_ctx->slab != dbg_ctx.slab) {
safety_check_fail(
"Internal heap corruption detected: "
"mismatch in slab bit");
return true;
}
}
return false;
if (config_opt_size_checks) {
emap_alloc_ctx_t dbg_ctx;
emap_alloc_ctx_lookup(
tsd_tsdn(tsd), &arena_emap_global, ptr, &dbg_ctx);
if (alloc_ctx->szind != dbg_ctx.szind) {
safety_check_fail_sized_dealloc(
/* current_dealloc */ true, ptr,
/* true_size */ emap_alloc_ctx_usize_get(&dbg_ctx),
/* input_size */
emap_alloc_ctx_usize_get(alloc_ctx));
return true;
}
if (alloc_ctx->slab != dbg_ctx.slab) {
safety_check_fail(
"Internal heap corruption detected: "
"mismatch in slab bit");
return true;
}
}
return false;
}
JEMALLOC_ALWAYS_INLINE bool
@ -447,7 +447,7 @@ prof_sample_aligned(const void *ptr) {
JEMALLOC_ALWAYS_INLINE bool
free_fastpath_nonfast_aligned(void *ptr, bool check_prof) {
/*
/*
* free_fastpath do not handle two uncommon cases: 1) sampled profiled
* objects and 2) sampled junk & stash for use-after-free detection.
* Both have special alignments which are used to escape the fastpath.
@ -456,144 +456,145 @@ free_fastpath_nonfast_aligned(void *ptr, bool check_prof) {
* are enabled (the assertion below). Avoiding redundant checks since
* this is on the fastpath -- at most one runtime branch from this.
*/
if (config_debug && cache_bin_nonfast_aligned(ptr)) {
assert(prof_sample_aligned(ptr));
}
if (config_debug && cache_bin_nonfast_aligned(ptr)) {
assert(prof_sample_aligned(ptr));
}
if (config_prof && check_prof) {
/* When prof is enabled, the prof_sample alignment is enough. */
if (prof_sample_aligned(ptr)) {
return true;
} else {
return false;
}
}
if (config_prof && check_prof) {
/* When prof is enabled, the prof_sample alignment is enough. */
if (prof_sample_aligned(ptr)) {
return true;
} else {
return false;
}
}
if (config_uaf_detection) {
if (cache_bin_nonfast_aligned(ptr)) {
return true;
} else {
return false;
}
}
if (config_uaf_detection) {
if (cache_bin_nonfast_aligned(ptr)) {
return true;
} else {
return false;
}
}
return false;
return false;
}
/* Returns whether or not the free attempt was successful. */
JEMALLOC_ALWAYS_INLINE
bool free_fastpath(void *ptr, size_t size, bool size_hint) {
tsd_t *tsd = tsd_get(false);
/* The branch gets optimized away unless tsd_get_allocates(). */
if (unlikely(tsd == NULL)) {
return false;
}
/*
bool
free_fastpath(void *ptr, size_t size, bool size_hint) {
tsd_t *tsd = tsd_get(false);
/* The branch gets optimized away unless tsd_get_allocates(). */
if (unlikely(tsd == NULL)) {
return false;
}
/*
* The tsd_fast() / initialized checks are folded into the branch
* testing (deallocated_after >= threshold) later in this function.
* The threshold will be set to 0 when !tsd_fast.
*/
assert(tsd_fast(tsd) ||
*tsd_thread_deallocated_next_event_fastp_get_unsafe(tsd) == 0);
assert(tsd_fast(tsd)
|| *tsd_thread_deallocated_next_event_fastp_get_unsafe(tsd) == 0);
emap_alloc_ctx_t alloc_ctx JEMALLOC_CC_SILENCE_INIT({0, 0, false});
size_t usize;
if (!size_hint) {
bool err = emap_alloc_ctx_try_lookup_fast(tsd,
&arena_emap_global, ptr, &alloc_ctx);
emap_alloc_ctx_t alloc_ctx JEMALLOC_CC_SILENCE_INIT({0, 0, false});
size_t usize;
if (!size_hint) {
bool err = emap_alloc_ctx_try_lookup_fast(
tsd, &arena_emap_global, ptr, &alloc_ctx);
/* Note: profiled objects will have alloc_ctx.slab set */
if (unlikely(err || !alloc_ctx.slab ||
free_fastpath_nonfast_aligned(ptr,
/* check_prof */ false))) {
return false;
}
assert(alloc_ctx.szind != SC_NSIZES);
/* Note: profiled objects will have alloc_ctx.slab set */
if (unlikely(err || !alloc_ctx.slab
|| free_fastpath_nonfast_aligned(ptr,
/* check_prof */ false))) {
return false;
}
assert(alloc_ctx.szind != SC_NSIZES);
usize = sz_index2size(alloc_ctx.szind);
} else {
/*
} else {
/*
* Check for both sizes that are too large, and for sampled /
* special aligned objects. The alignment check will also check
* for null ptr.
*/
if (unlikely(size > SC_LOOKUP_MAXCLASS ||
free_fastpath_nonfast_aligned(ptr,
/* check_prof */ true))) {
return false;
}
if (unlikely(size > SC_LOOKUP_MAXCLASS
|| free_fastpath_nonfast_aligned(ptr,
/* check_prof */ true))) {
return false;
}
sz_size2index_usize_fastpath(size, &alloc_ctx.szind, &usize);
/* Max lookup class must be small. */
assert(alloc_ctx.szind < SC_NBINS);
/* This is a dead store, except when opt size checking is on. */
alloc_ctx.slab = true;
}
/*
/* Max lookup class must be small. */
assert(alloc_ctx.szind < SC_NBINS);
/* This is a dead store, except when opt size checking is on. */
alloc_ctx.slab = true;
}
/*
* Currently the fastpath only handles small sizes. The branch on
* SC_LOOKUP_MAXCLASS makes sure of it. This lets us avoid checking
* tcache szind upper limit (i.e. tcache_max) as well.
*/
assert(alloc_ctx.slab);
assert(alloc_ctx.slab);
uint64_t deallocated, threshold;
te_free_fastpath_ctx(tsd, &deallocated, &threshold);
uint64_t deallocated, threshold;
te_free_fastpath_ctx(tsd, &deallocated, &threshold);
uint64_t deallocated_after = deallocated + usize;
/*
uint64_t deallocated_after = deallocated + usize;
/*
* Check for events and tsd non-nominal (fast_threshold will be set to
* 0) in a single branch. Note that this handles the uninitialized case
* as well (TSD init will be triggered on the non-fastpath). Therefore
* anything depends on a functional TSD (e.g. the alloc_ctx sanity check
* below) needs to be after this branch.
*/
if (unlikely(deallocated_after >= threshold)) {
return false;
}
assert(tsd_fast(tsd));
bool fail = maybe_check_alloc_ctx(tsd, ptr, &alloc_ctx);
if (fail) {
/* See the comment in isfree. */
return true;
}
if (unlikely(deallocated_after >= threshold)) {
return false;
}
assert(tsd_fast(tsd));
bool fail = maybe_check_alloc_ctx(tsd, ptr, &alloc_ctx);
if (fail) {
/* See the comment in isfree. */
return true;
}
tcache_t *tcache = tcache_get_from_ind(tsd, TCACHE_IND_AUTOMATIC,
/* slow */ false, /* is_alloc */ false);
cache_bin_t *bin = &tcache->bins[alloc_ctx.szind];
tcache_t *tcache = tcache_get_from_ind(tsd, TCACHE_IND_AUTOMATIC,
/* slow */ false, /* is_alloc */ false);
cache_bin_t *bin = &tcache->bins[alloc_ctx.szind];
/*
/*
* If junking were enabled, this is where we would do it. It's not
* though, since we ensured above that we're on the fast path. Assert
* that to double-check.
*/
assert(!opt_junk_free);
assert(!opt_junk_free);
if (!cache_bin_dalloc_easy(bin, ptr)) {
return false;
}
if (!cache_bin_dalloc_easy(bin, ptr)) {
return false;
}
*tsd_thread_deallocatedp_get(tsd) = deallocated_after;
*tsd_thread_deallocatedp_get(tsd) = deallocated_after;
return true;
return true;
}
JEMALLOC_ALWAYS_INLINE void JEMALLOC_NOTHROW
je_sdallocx_noflags(void *ptr, size_t size) {
if (!free_fastpath(ptr, size, true)) {
sdallocx_default(ptr, size, 0);
}
if (!free_fastpath(ptr, size, true)) {
sdallocx_default(ptr, size, 0);
}
}
JEMALLOC_ALWAYS_INLINE void JEMALLOC_NOTHROW
je_sdallocx_impl(void *ptr, size_t size, int flags) {
if (flags != 0 || !free_fastpath(ptr, size, true)) {
sdallocx_default(ptr, size, flags);
}
if (flags != 0 || !free_fastpath(ptr, size, true)) {
sdallocx_default(ptr, size, flags);
}
}
JEMALLOC_ALWAYS_INLINE void JEMALLOC_NOTHROW
je_free_impl(void *ptr) {
if (!free_fastpath(ptr, 0, false)) {
free_default(ptr);
}
if (!free_fastpath(ptr, 0, false)) {
free_default(ptr);
}
}
#endif /* JEMALLOC_INTERNAL_INLINES_C_H */

View file

@ -2,45 +2,46 @@
#define JEMALLOC_INTERNAL_MACROS_H
#ifdef JEMALLOC_DEBUG
# define JEMALLOC_ALWAYS_INLINE static inline
# define JEMALLOC_ALWAYS_INLINE static inline
#else
# ifdef _MSC_VER
# define JEMALLOC_ALWAYS_INLINE static __forceinline
# else
# define JEMALLOC_ALWAYS_INLINE JEMALLOC_ATTR(always_inline) static inline
# endif
# ifdef _MSC_VER
# define JEMALLOC_ALWAYS_INLINE static __forceinline
# else
# define JEMALLOC_ALWAYS_INLINE \
JEMALLOC_ATTR(always_inline) static inline
# endif
#endif
#ifdef _MSC_VER
# define inline _inline
# define inline _inline
#endif
#define UNUSED JEMALLOC_ATTR(unused)
#define ZU(z) ((size_t)z)
#define ZD(z) ((ssize_t)z)
#define QU(q) ((uint64_t)q)
#define QD(q) ((int64_t)q)
#define ZU(z) ((size_t)z)
#define ZD(z) ((ssize_t)z)
#define QU(q) ((uint64_t)q)
#define QD(q) ((int64_t)q)
#define KZU(z) ZU(z##ULL)
#define KZD(z) ZD(z##LL)
#define KQU(q) QU(q##ULL)
#define KQD(q) QI(q##LL)
#define KZU(z) ZU(z##ULL)
#define KZD(z) ZD(z##LL)
#define KQU(q) QU(q##ULL)
#define KQD(q) QI(q##LL)
#ifndef __DECONST
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
#endif
#if !defined(JEMALLOC_HAS_RESTRICT) || defined(__cplusplus)
# define restrict
# define restrict
#endif
/* Various function pointers are static and immutable except during testing. */
#ifdef JEMALLOC_JET
# define JET_MUTABLE
# define JET_EXTERN extern
# define JET_MUTABLE
# define JET_EXTERN extern
#else
# define JET_MUTABLE const
# define JET_EXTERN static
# define JET_MUTABLE const
# define JET_EXTERN static
#endif
#define JEMALLOC_VA_ARGS_HEAD(head, ...) head
@ -48,91 +49,93 @@
/* Diagnostic suppression macros */
#if defined(_MSC_VER) && !defined(__clang__)
# define JEMALLOC_DIAGNOSTIC_PUSH __pragma(warning(push))
# define JEMALLOC_DIAGNOSTIC_POP __pragma(warning(pop))
# define JEMALLOC_DIAGNOSTIC_IGNORE(W) __pragma(warning(disable:W))
# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
# define JEMALLOC_DIAGNOSTIC_IGNORE_FRAME_ADDRESS
# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS
# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
# define JEMALLOC_DIAGNOSTIC_IGNORE_DEPRECATED
# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
# define JEMALLOC_DIAGNOSTIC_PUSH __pragma(warning(push))
# define JEMALLOC_DIAGNOSTIC_POP __pragma(warning(pop))
# define JEMALLOC_DIAGNOSTIC_IGNORE(W) __pragma(warning(disable : W))
# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
# define JEMALLOC_DIAGNOSTIC_IGNORE_FRAME_ADDRESS
# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS
# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
# define JEMALLOC_DIAGNOSTIC_IGNORE_DEPRECATED
# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
/* #pragma GCC diagnostic first appeared in gcc 4.6. */
#elif (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && \
(__GNUC_MINOR__ > 5)))) || defined(__clang__)
#elif (defined(__GNUC__) \
&& ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5)))) \
|| defined(__clang__)
/*
* The JEMALLOC_PRAGMA__ macro is an implementation detail of the GCC and Clang
* diagnostic suppression macros and should not be used anywhere else.
*/
# define JEMALLOC_PRAGMA__(X) _Pragma(#X)
# define JEMALLOC_DIAGNOSTIC_PUSH JEMALLOC_PRAGMA__(GCC diagnostic push)
# define JEMALLOC_DIAGNOSTIC_POP JEMALLOC_PRAGMA__(GCC diagnostic pop)
# define JEMALLOC_DIAGNOSTIC_IGNORE(W) \
JEMALLOC_PRAGMA__(GCC diagnostic ignored W)
# define JEMALLOC_PRAGMA__(X) _Pragma(#X)
# define JEMALLOC_DIAGNOSTIC_PUSH JEMALLOC_PRAGMA__(GCC diagnostic push)
# define JEMALLOC_DIAGNOSTIC_POP JEMALLOC_PRAGMA__(GCC diagnostic pop)
# define JEMALLOC_DIAGNOSTIC_IGNORE(W) \
JEMALLOC_PRAGMA__(GCC diagnostic ignored W)
/*
* The -Wmissing-field-initializers warning is buggy in GCC versions < 5.1 and
* all clang versions up to version 7 (currently trunk, unreleased). This macro
* suppresses the warning for the affected compiler versions only.
*/
# if ((defined(__GNUC__) && !defined(__clang__)) && (__GNUC__ < 5)) || \
defined(__clang__)
# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS \
JEMALLOC_DIAGNOSTIC_IGNORE("-Wmissing-field-initializers")
# else
# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
# endif
# if ((defined(__GNUC__) && !defined(__clang__)) && (__GNUC__ < 5)) \
|| defined(__clang__)
# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS \
JEMALLOC_DIAGNOSTIC_IGNORE( \
"-Wmissing-field-initializers")
# else
# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
# endif
# define JEMALLOC_DIAGNOSTIC_IGNORE_FRAME_ADDRESS \
JEMALLOC_DIAGNOSTIC_IGNORE("-Wframe-address")
# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS \
JEMALLOC_DIAGNOSTIC_IGNORE("-Wtype-limits")
# define JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER \
JEMALLOC_DIAGNOSTIC_IGNORE("-Wunused-parameter")
# if defined(__GNUC__) && !defined(__clang__) && (__GNUC__ >= 7)
# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN \
JEMALLOC_DIAGNOSTIC_IGNORE("-Walloc-size-larger-than=")
# else
# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
# endif
# ifdef JEMALLOC_HAVE_ATTR_DEPRECATED
# define JEMALLOC_DIAGNOSTIC_IGNORE_DEPRECATED \
JEMALLOC_DIAGNOSTIC_IGNORE("-Wdeprecated-declarations")
# else
# define JEMALLOC_DIAGNOSTIC_IGNORE_DEPRECATED
# endif
# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS \
JEMALLOC_DIAGNOSTIC_PUSH \
JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER
# define JEMALLOC_DIAGNOSTIC_IGNORE_FRAME_ADDRESS \
JEMALLOC_DIAGNOSTIC_IGNORE("-Wframe-address")
# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS \
JEMALLOC_DIAGNOSTIC_IGNORE("-Wtype-limits")
# define JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER \
JEMALLOC_DIAGNOSTIC_IGNORE("-Wunused-parameter")
# if defined(__GNUC__) && !defined(__clang__) && (__GNUC__ >= 7)
# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN \
JEMALLOC_DIAGNOSTIC_IGNORE("-Walloc-size-larger-than=")
# else
# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
# endif
# ifdef JEMALLOC_HAVE_ATTR_DEPRECATED
# define JEMALLOC_DIAGNOSTIC_IGNORE_DEPRECATED \
JEMALLOC_DIAGNOSTIC_IGNORE("-Wdeprecated-declarations")
# else
# define JEMALLOC_DIAGNOSTIC_IGNORE_DEPRECATED
# endif
# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS \
JEMALLOC_DIAGNOSTIC_PUSH \
JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER
#else
# define JEMALLOC_DIAGNOSTIC_PUSH
# define JEMALLOC_DIAGNOSTIC_POP
# define JEMALLOC_DIAGNOSTIC_IGNORE(W)
# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
# define JEMALLOC_DIAGNOSTIC_IGNORE_FRAME_ADDRESS
# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS
# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
# define JEMALLOC_DIAGNOSTIC_IGNORE_DEPRECATED
# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
# define JEMALLOC_DIAGNOSTIC_PUSH
# define JEMALLOC_DIAGNOSTIC_POP
# define JEMALLOC_DIAGNOSTIC_IGNORE(W)
# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
# define JEMALLOC_DIAGNOSTIC_IGNORE_FRAME_ADDRESS
# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS
# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
# define JEMALLOC_DIAGNOSTIC_IGNORE_DEPRECATED
# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
#endif
#ifdef __clang_analyzer__
# define JEMALLOC_CLANG_ANALYZER
# define JEMALLOC_CLANG_ANALYZER
#endif
#ifdef JEMALLOC_CLANG_ANALYZER
# define JEMALLOC_CLANG_ANALYZER_SUPPRESS __attribute__((suppress))
# define JEMALLOC_CLANG_ANALYZER_SILENCE_INIT(v) = v
# define JEMALLOC_CLANG_ANALYZER_SUPPRESS __attribute__((suppress))
# define JEMALLOC_CLANG_ANALYZER_SILENCE_INIT(v) = v
#else
# define JEMALLOC_CLANG_ANALYZER_SUPPRESS
# define JEMALLOC_CLANG_ANALYZER_SILENCE_INIT(v)
# define JEMALLOC_CLANG_ANALYZER_SUPPRESS
# define JEMALLOC_CLANG_ANALYZER_SILENCE_INIT(v)
#endif
#define JEMALLOC_SUPPRESS_WARN_ON_USAGE(...) \
JEMALLOC_DIAGNOSTIC_PUSH \
JEMALLOC_DIAGNOSTIC_IGNORE_DEPRECATED \
__VA_ARGS__ \
JEMALLOC_DIAGNOSTIC_POP
#define JEMALLOC_SUPPRESS_WARN_ON_USAGE(...) \
JEMALLOC_DIAGNOSTIC_PUSH \
JEMALLOC_DIAGNOSTIC_IGNORE_DEPRECATED \
__VA_ARGS__ \
JEMALLOC_DIAGNOSTIC_POP
/*
* Disables spurious diagnostics for all headers. Since these headers are not

View file

@ -9,13 +9,14 @@
*/
#ifdef JEMALLOC_OVERRIDE_LG_PAGE
#undef LG_PAGE
#define LG_PAGE JEMALLOC_OVERRIDE_LG_PAGE
# undef LG_PAGE
# define LG_PAGE JEMALLOC_OVERRIDE_LG_PAGE
#endif
#ifdef JEMALLOC_OVERRIDE_JEMALLOC_CONFIG_MALLOC_CONF
#undef JEMALLOC_CONFIG_MALLOC_CONF
#define JEMALLOC_CONFIG_MALLOC_CONF JEMALLOC_OVERRIDE_JEMALLOC_CONFIG_MALLOC_CONF
# undef JEMALLOC_CONFIG_MALLOC_CONF
# define JEMALLOC_CONFIG_MALLOC_CONF \
JEMALLOC_OVERRIDE_JEMALLOC_CONFIG_MALLOC_CONF
#endif
#endif /* JEMALLOC_INTERNAL_OVERRIDES_H */

View file

@ -18,13 +18,13 @@ enum zero_realloc_action_e {
typedef enum zero_realloc_action_e zero_realloc_action_t;
/* Signature of write callback. */
typedef void (write_cb_t)(void *, const char *);
typedef void(write_cb_t)(void *, const char *);
enum malloc_init_e {
malloc_init_uninitialized = 3,
malloc_init_a0_initialized = 2,
malloc_init_recursible = 1,
malloc_init_initialized = 0 /* Common case --> jnz. */
malloc_init_uninitialized = 3,
malloc_init_a0_initialized = 2,
malloc_init_recursible = 1,
malloc_init_initialized = 0 /* Common case --> jnz. */
};
typedef enum malloc_init_e malloc_init_t;
@ -39,48 +39,46 @@ typedef enum malloc_init_e malloc_init_t;
*
* aaaaaaaa aaaatttt tttttttt 0znnnnnn
*/
#define MALLOCX_ARENA_BITS 12
#define MALLOCX_TCACHE_BITS 12
#define MALLOCX_LG_ALIGN_BITS 6
#define MALLOCX_ARENA_SHIFT 20
#define MALLOCX_TCACHE_SHIFT 8
#define MALLOCX_ARENA_MASK \
((unsigned)(((1U << MALLOCX_ARENA_BITS) - 1) << MALLOCX_ARENA_SHIFT))
#define MALLOCX_ARENA_BITS 12
#define MALLOCX_TCACHE_BITS 12
#define MALLOCX_LG_ALIGN_BITS 6
#define MALLOCX_ARENA_SHIFT 20
#define MALLOCX_TCACHE_SHIFT 8
#define MALLOCX_ARENA_MASK \
((unsigned)(((1U << MALLOCX_ARENA_BITS) - 1) << MALLOCX_ARENA_SHIFT))
/* NB: Arena index bias decreases the maximum number of arenas by 1. */
#define MALLOCX_ARENA_LIMIT ((unsigned)((1U << MALLOCX_ARENA_BITS) - 1))
#define MALLOCX_TCACHE_MASK \
((unsigned)(((1U << MALLOCX_TCACHE_BITS) - 1) << MALLOCX_TCACHE_SHIFT))
#define MALLOCX_TCACHE_MAX ((unsigned)((1U << MALLOCX_TCACHE_BITS) - 3))
#define MALLOCX_LG_ALIGN_MASK ((1 << MALLOCX_LG_ALIGN_BITS) - 1)
#define MALLOCX_ARENA_LIMIT ((unsigned)((1U << MALLOCX_ARENA_BITS) - 1))
#define MALLOCX_TCACHE_MASK \
((unsigned)(((1U << MALLOCX_TCACHE_BITS) - 1) << MALLOCX_TCACHE_SHIFT))
#define MALLOCX_TCACHE_MAX ((unsigned)((1U << MALLOCX_TCACHE_BITS) - 3))
#define MALLOCX_LG_ALIGN_MASK ((1 << MALLOCX_LG_ALIGN_BITS) - 1)
/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \
(ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
#define MALLOCX_ALIGN_GET(flags) \
(MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
#define MALLOCX_ZERO_GET(flags) \
((bool)(flags & MALLOCX_ZERO))
#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \
(ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
#define MALLOCX_ALIGN_GET(flags) \
(MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX - 1))
#define MALLOCX_ZERO_GET(flags) ((bool)(flags & MALLOCX_ZERO))
#define MALLOCX_TCACHE_GET(flags) \
(((unsigned)((flags & MALLOCX_TCACHE_MASK) >> MALLOCX_TCACHE_SHIFT)) - 2)
#define MALLOCX_ARENA_GET(flags) \
(((unsigned)(((unsigned)flags) >> MALLOCX_ARENA_SHIFT)) - 1)
#define MALLOCX_TCACHE_GET(flags) \
(((unsigned)((flags & MALLOCX_TCACHE_MASK) >> MALLOCX_TCACHE_SHIFT)) \
- 2)
#define MALLOCX_ARENA_GET(flags) \
(((unsigned)(((unsigned)flags) >> MALLOCX_ARENA_SHIFT)) - 1)
/* Smallest size class to support. */
#define TINY_MIN (1U << LG_TINY_MIN)
#define TINY_MIN (1U << LG_TINY_MIN)
#define LONG ((size_t)(1U << LG_SIZEOF_LONG))
#define LONG_MASK (LONG - 1)
#define LONG ((size_t)(1U << LG_SIZEOF_LONG))
#define LONG_MASK (LONG - 1)
/* Return the smallest long multiple that is >= a. */
#define LONG_CEILING(a) \
(((a) + LONG_MASK) & ~LONG_MASK)
#define LONG_CEILING(a) (((a) + LONG_MASK) & ~LONG_MASK)
#define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
#define PTR_MASK (SIZEOF_PTR - 1)
#define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
#define PTR_MASK (SIZEOF_PTR - 1)
/* Return the smallest (void *) multiple that is >= a. */
#define PTR_CEILING(a) \
(((a) + PTR_MASK) & ~PTR_MASK)
#define PTR_CEILING(a) (((a) + PTR_MASK) & ~PTR_MASK)
/*
* Maximum size of L1 cache line. This is used to avoid cache line aliasing.
@ -89,25 +87,24 @@ typedef enum malloc_init_e malloc_init_t;
* CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
* only handle raw constants.
*/
#define LG_CACHELINE 6
#define CACHELINE 64
#define CACHELINE_MASK (CACHELINE - 1)
#define LG_CACHELINE 6
#define CACHELINE 64
#define CACHELINE_MASK (CACHELINE - 1)
/* Return the smallest cacheline multiple that is >= s. */
#define CACHELINE_CEILING(s) \
(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
#define CACHELINE_CEILING(s) (((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
/* Return the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2BASE(a, alignment) \
((void *)(((byte_t *)(a)) - (((uintptr_t)(a)) - \
((uintptr_t)(a) & ((~(alignment)) + 1)))))
#define ALIGNMENT_ADDR2BASE(a, alignment) \
((void *)(((byte_t *)(a)) \
- (((uintptr_t)(a)) - ((uintptr_t)(a) & ((~(alignment)) + 1)))))
/* Return the offset between a and the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
((size_t)((uintptr_t)(a) & (alignment - 1)))
/* Return the smallest alignment multiple that is >= s. */
#define ALIGNMENT_CEILING(s, alignment) \
#define ALIGNMENT_CEILING(s, alignment) \
(((s) + (alignment - 1)) & ((~(alignment)) + 1))
/*
@ -119,30 +116,31 @@ typedef enum malloc_init_e malloc_init_t;
* provenance from the compiler. See the block-comment on the
* definition of `byte_t` for more details.
*/
#define ALIGNMENT_ADDR2CEILING(a, alignment) \
((void *)(((byte_t *)(a)) + (((((uintptr_t)(a)) + \
(alignment - 1)) & ((~(alignment)) + 1)) - ((uintptr_t)(a)))))
#define ALIGNMENT_ADDR2CEILING(a, alignment) \
((void *)(((byte_t *)(a)) \
+ (((((uintptr_t)(a)) + (alignment - 1)) & ((~(alignment)) + 1)) \
- ((uintptr_t)(a)))))
/* Declare a variable-length array. */
#if __STDC_VERSION__ < 199901L || defined(__STDC_NO_VLA__)
# ifdef _MSC_VER
# include <malloc.h>
# define alloca _alloca
# else
# ifdef JEMALLOC_HAS_ALLOCA_H
# include <alloca.h>
# else
# include <stdlib.h>
# endif
# endif
# define VARIABLE_ARRAY_UNSAFE(type, name, count) \
type *name = alloca(sizeof(type) * (count))
# ifdef _MSC_VER
# include <malloc.h>
# define alloca _alloca
# else
# ifdef JEMALLOC_HAS_ALLOCA_H
# include <alloca.h>
# else
# include <stdlib.h>
# endif
# endif
# define VARIABLE_ARRAY_UNSAFE(type, name, count) \
type *name = alloca(sizeof(type) * (count))
#else
# define VARIABLE_ARRAY_UNSAFE(type, name, count) type name[(count)]
# define VARIABLE_ARRAY_UNSAFE(type, name, count) type name[(count)]
#endif
#define VARIABLE_ARRAY_SIZE_MAX 2048
#define VARIABLE_ARRAY(type, name, count) \
assert(sizeof(type) * (count) <= VARIABLE_ARRAY_SIZE_MAX); \
#define VARIABLE_ARRAY_SIZE_MAX 2048
#define VARIABLE_ARRAY(type, name, count) \
assert(sizeof(type) * (count) <= VARIABLE_ARRAY_SIZE_MAX); \
VARIABLE_ARRAY_UNSAFE(type, name, count)
#define CALLOC_MADVISE_THRESHOLD_DEFAULT (((size_t)1) << 23) /* 8 MB */

View file

@ -6,20 +6,20 @@
#include "jemalloc/internal/hook.h"
void *large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
void *large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool zero);
bool large_ralloc_no_move(tsdn_t *tsdn, edata_t *edata, size_t usize_min,
size_t usize_max, bool zero);
void *large_palloc(
tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero);
bool large_ralloc_no_move(tsdn_t *tsdn, edata_t *edata, size_t usize_min,
size_t usize_max, bool zero);
void *large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
size_t alignment, bool zero, tcache_t *tcache,
hook_ralloc_args_t *hook_args);
void large_dalloc_prep_locked(tsdn_t *tsdn, edata_t *edata);
void large_dalloc_finish(tsdn_t *tsdn, edata_t *edata);
void large_dalloc(tsdn_t *tsdn, edata_t *edata);
void large_dalloc_prep_locked(tsdn_t *tsdn, edata_t *edata);
void large_dalloc_finish(tsdn_t *tsdn, edata_t *edata);
void large_dalloc(tsdn_t *tsdn, edata_t *edata);
size_t large_salloc(tsdn_t *tsdn, const edata_t *edata);
void large_prof_info_get(tsd_t *tsd, edata_t *edata, prof_info_t *prof_info,
bool reset_recent);
void large_prof_info_get(
tsd_t *tsd, edata_t *edata, prof_info_t *prof_info, bool reset_recent);
void large_prof_tctx_reset(edata_t *edata);
void large_prof_info_set(edata_t *edata, prof_tctx_t *tctx, size_t size);

View file

@ -30,33 +30,34 @@ struct locked_zu_s {
};
#ifndef JEMALLOC_ATOMIC_U64
# define LOCKEDINT_MTX_DECLARE(name) malloc_mutex_t name;
# define LOCKEDINT_MTX_INIT(mu, name, rank, rank_mode) \
malloc_mutex_init(&(mu), name, rank, rank_mode)
# define LOCKEDINT_MTX(mtx) (&(mtx))
# define LOCKEDINT_MTX_LOCK(tsdn, mu) malloc_mutex_lock(tsdn, &(mu))
# define LOCKEDINT_MTX_UNLOCK(tsdn, mu) malloc_mutex_unlock(tsdn, &(mu))
# define LOCKEDINT_MTX_PREFORK(tsdn, mu) malloc_mutex_prefork(tsdn, &(mu))
# define LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, mu) \
malloc_mutex_postfork_parent(tsdn, &(mu))
# define LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, mu) \
malloc_mutex_postfork_child(tsdn, &(mu))
# define LOCKEDINT_MTX_DECLARE(name) malloc_mutex_t name;
# define LOCKEDINT_MTX_INIT(mu, name, rank, rank_mode) \
malloc_mutex_init(&(mu), name, rank, rank_mode)
# define LOCKEDINT_MTX(mtx) (&(mtx))
# define LOCKEDINT_MTX_LOCK(tsdn, mu) malloc_mutex_lock(tsdn, &(mu))
# define LOCKEDINT_MTX_UNLOCK(tsdn, mu) malloc_mutex_unlock(tsdn, &(mu))
# define LOCKEDINT_MTX_PREFORK(tsdn, mu) \
malloc_mutex_prefork(tsdn, &(mu))
# define LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, mu) \
malloc_mutex_postfork_parent(tsdn, &(mu))
# define LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, mu) \
malloc_mutex_postfork_child(tsdn, &(mu))
#else
# define LOCKEDINT_MTX_DECLARE(name)
# define LOCKEDINT_MTX(mtx) NULL
# define LOCKEDINT_MTX_INIT(mu, name, rank, rank_mode) false
# define LOCKEDINT_MTX_LOCK(tsdn, mu)
# define LOCKEDINT_MTX_UNLOCK(tsdn, mu)
# define LOCKEDINT_MTX_PREFORK(tsdn, mu)
# define LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, mu)
# define LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, mu)
# define LOCKEDINT_MTX_DECLARE(name)
# define LOCKEDINT_MTX(mtx) NULL
# define LOCKEDINT_MTX_INIT(mu, name, rank, rank_mode) false
# define LOCKEDINT_MTX_LOCK(tsdn, mu)
# define LOCKEDINT_MTX_UNLOCK(tsdn, mu)
# define LOCKEDINT_MTX_PREFORK(tsdn, mu)
# define LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, mu)
# define LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, mu)
#endif
#ifdef JEMALLOC_ATOMIC_U64
# define LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx) assert((mtx) == NULL)
# define LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx) assert((mtx) == NULL)
#else
# define LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx) \
malloc_mutex_assert_owner(tsdn, (mtx))
# define LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx) \
malloc_mutex_assert_owner(tsdn, (mtx))
#endif
static inline uint64_t
@ -70,8 +71,7 @@ locked_read_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p) {
}
static inline void
locked_inc_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p,
uint64_t x) {
locked_inc_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p, uint64_t x) {
LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
#ifdef JEMALLOC_ATOMIC_U64
atomic_fetch_add_u64(&p->val, x, ATOMIC_RELAXED);
@ -81,8 +81,7 @@ locked_inc_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p,
}
static inline void
locked_dec_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p,
uint64_t x) {
locked_dec_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p, uint64_t x) {
LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
#ifdef JEMALLOC_ATOMIC_U64
uint64_t r = atomic_fetch_sub_u64(&p->val, x, ATOMIC_RELAXED);
@ -99,7 +98,7 @@ locked_inc_mod_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p,
const uint64_t x, const uint64_t modulus) {
LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
uint64_t before, after;
bool overflow;
bool overflow;
#ifdef JEMALLOC_ATOMIC_U64
before = atomic_load_u64(&p->val, ATOMIC_RELAXED);
do {
@ -109,8 +108,8 @@ locked_inc_mod_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p,
if (overflow) {
after %= modulus;
}
} while (!atomic_compare_exchange_weak_u64(&p->val, &before, after,
ATOMIC_RELAXED, ATOMIC_RELAXED));
} while (!atomic_compare_exchange_weak_u64(
&p->val, &before, after, ATOMIC_RELAXED, ATOMIC_RELAXED));
#else
before = p->val;
after = before + x;
@ -167,8 +166,7 @@ locked_read_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p) {
}
static inline void
locked_inc_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p,
size_t x) {
locked_inc_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p, size_t x) {
LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
#ifdef JEMALLOC_ATOMIC_U64
atomic_fetch_add_zu(&p->val, x, ATOMIC_RELAXED);
@ -179,8 +177,7 @@ locked_inc_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p,
}
static inline void
locked_dec_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p,
size_t x) {
locked_dec_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p, size_t x) {
LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
#ifdef JEMALLOC_ATOMIC_U64
size_t r = atomic_fetch_sub_zu(&p->val, x, ATOMIC_RELAXED);

View file

@ -7,9 +7,9 @@
#include "jemalloc/internal/mutex.h"
#ifdef JEMALLOC_LOG
# define JEMALLOC_LOG_VAR_BUFSIZE 1000
# define JEMALLOC_LOG_VAR_BUFSIZE 1000
#else
# define JEMALLOC_LOG_VAR_BUFSIZE 1
# define JEMALLOC_LOG_VAR_BUFSIZE 1
#endif
#define JEMALLOC_LOG_BUFSIZE 4096
@ -36,7 +36,7 @@
* statements.
*/
extern char log_var_names[JEMALLOC_LOG_VAR_BUFSIZE];
extern char log_var_names[JEMALLOC_LOG_VAR_BUFSIZE];
extern atomic_b_t log_init_done;
typedef struct log_var_s log_var_t;
@ -45,7 +45,7 @@ struct log_var_s {
* Lowest bit is "inited", second lowest is "enabled". Putting them in
* a single word lets us avoid any fences on weak architectures.
*/
atomic_u_t state;
atomic_u_t state;
const char *name;
};
@ -53,7 +53,8 @@ struct log_var_s {
#define LOG_INITIALIZED_NOT_ENABLED 1U
#define LOG_ENABLED 2U
#define LOG_VAR_INIT(name_str) {ATOMIC_INIT(LOG_NOT_INITIALIZED), name_str}
#define LOG_VAR_INIT(name_str) \
{ ATOMIC_INIT(LOG_NOT_INITIALIZED), name_str }
/*
* Returns the value we should assume for state (which is not necessarily
@ -63,21 +64,21 @@ struct log_var_s {
unsigned log_var_update_state(log_var_t *log_var);
/* We factor out the metadata management to allow us to test more easily. */
#define log_do_begin(log_var) \
if (config_log) { \
unsigned log_state = atomic_load_u(&(log_var).state, \
ATOMIC_RELAXED); \
if (unlikely(log_state == LOG_NOT_INITIALIZED)) { \
log_state = log_var_update_state(&(log_var)); \
assert(log_state != LOG_NOT_INITIALIZED); \
} \
if (log_state == LOG_ENABLED) { \
{
/* User code executes here. */
#define log_do_end(log_var) \
} \
} \
}
#define log_do_begin(log_var) \
if (config_log) { \
unsigned log_state = atomic_load_u( \
&(log_var).state, ATOMIC_RELAXED); \
if (unlikely(log_state == LOG_NOT_INITIALIZED)) { \
log_state = log_var_update_state(&(log_var)); \
assert(log_state != LOG_NOT_INITIALIZED); \
} \
if (log_state == LOG_ENABLED) { \
{
/* User code executes here. */
#define log_do_end(log_var) \
} \
} \
}
/*
* MSVC has some preprocessor bugs in its expansion of __VA_ARGS__ during
@ -88,28 +89,29 @@ if (config_log) { \
*/
static inline void
log_impl_varargs(const char *name, ...) {
char buf[JEMALLOC_LOG_BUFSIZE];
char buf[JEMALLOC_LOG_BUFSIZE];
va_list ap;
va_start(ap, name);
const char *format = va_arg(ap, const char *);
size_t dst_offset = 0;
size_t dst_offset = 0;
dst_offset += malloc_snprintf(buf, JEMALLOC_LOG_BUFSIZE, "%s: ", name);
dst_offset += malloc_vsnprintf(buf + dst_offset,
JEMALLOC_LOG_BUFSIZE - dst_offset, format, ap);
malloc_snprintf(buf + dst_offset, JEMALLOC_LOG_BUFSIZE - dst_offset, "\n");
dst_offset += malloc_vsnprintf(
buf + dst_offset, JEMALLOC_LOG_BUFSIZE - dst_offset, format, ap);
malloc_snprintf(
buf + dst_offset, JEMALLOC_LOG_BUFSIZE - dst_offset, "\n");
va_end(ap);
malloc_write(buf);
}
/* Call as log("log.var.str", "format_string %d", arg_for_format_string); */
#define LOG(log_var_str, ...) \
do { \
static log_var_t log_var = LOG_VAR_INIT(log_var_str); \
log_do_begin(log_var) \
log_impl_varargs((log_var).name, __VA_ARGS__); \
log_do_end(log_var) \
} while (0)
#define LOG(log_var_str, ...) \
do { \
static log_var_t log_var = LOG_VAR_INIT(log_var_str); \
log_do_begin(log_var) \
log_impl_varargs((log_var).name, __VA_ARGS__); \
log_do_end(log_var) \
} while (0)
#endif /* JEMALLOC_INTERNAL_LOG_H */

View file

@ -5,64 +5,63 @@
#include "jemalloc/internal/jemalloc_internal_types.h"
#ifdef _WIN32
# ifdef _WIN64
# define FMT64_PREFIX "ll"
# define FMTPTR_PREFIX "ll"
# else
# define FMT64_PREFIX "ll"
# define FMTPTR_PREFIX ""
# endif
# define FMTd32 "d"
# define FMTu32 "u"
# define FMTx32 "x"
# define FMTd64 FMT64_PREFIX "d"
# define FMTu64 FMT64_PREFIX "u"
# define FMTx64 FMT64_PREFIX "x"
# define FMTdPTR FMTPTR_PREFIX "d"
# define FMTuPTR FMTPTR_PREFIX "u"
# define FMTxPTR FMTPTR_PREFIX "x"
# ifdef _WIN64
# define FMT64_PREFIX "ll"
# define FMTPTR_PREFIX "ll"
# else
# define FMT64_PREFIX "ll"
# define FMTPTR_PREFIX ""
# endif
# define FMTd32 "d"
# define FMTu32 "u"
# define FMTx32 "x"
# define FMTd64 FMT64_PREFIX "d"
# define FMTu64 FMT64_PREFIX "u"
# define FMTx64 FMT64_PREFIX "x"
# define FMTdPTR FMTPTR_PREFIX "d"
# define FMTuPTR FMTPTR_PREFIX "u"
# define FMTxPTR FMTPTR_PREFIX "x"
#else
# include <inttypes.h>
# define FMTd32 PRId32
# define FMTu32 PRIu32
# define FMTx32 PRIx32
# define FMTd64 PRId64
# define FMTu64 PRIu64
# define FMTx64 PRIx64
# define FMTdPTR PRIdPTR
# define FMTuPTR PRIuPTR
# define FMTxPTR PRIxPTR
# include <inttypes.h>
# define FMTd32 PRId32
# define FMTu32 PRIu32
# define FMTx32 PRIx32
# define FMTd64 PRId64
# define FMTu64 PRIu64
# define FMTx64 PRIx64
# define FMTdPTR PRIdPTR
# define FMTuPTR PRIuPTR
# define FMTxPTR PRIxPTR
#endif
/* Size of stack-allocated buffer passed to buferror(). */
#define BUFERROR_BUF 64
#define BUFERROR_BUF 64
/*
* Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be
* large enough for all possible uses within jemalloc.
*/
#define MALLOC_PRINTF_BUFSIZE 4096
#define MALLOC_PRINTF_BUFSIZE 4096
write_cb_t wrtmessage;
int buferror(int err, char *buf, size_t buflen);
uintmax_t malloc_strtoumax(const char *restrict nptr, char **restrict endptr,
int base);
int buferror(int err, char *buf, size_t buflen);
uintmax_t malloc_strtoumax(
const char *restrict nptr, char **restrict endptr, int base);
void malloc_write(const char *s);
/*
* malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
* point math.
*/
size_t malloc_vsnprintf(char *str, size_t size, const char *format,
va_list ap);
size_t malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap);
size_t malloc_snprintf(char *str, size_t size, const char *format, ...)
JEMALLOC_FORMAT_PRINTF(3, 4);
/*
* The caller can set write_cb to null to choose to print with the
* je_malloc_message hook.
*/
void malloc_vcprintf(write_cb_t *write_cb, void *cbopaque, const char *format,
va_list ap);
void malloc_vcprintf(
write_cb_t *write_cb, void *cbopaque, const char *format, va_list ap);
void malloc_cprintf(write_cb_t *write_cb, void *cbopaque, const char *format,
...) JEMALLOC_FORMAT_PRINTF(3, 4);
void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
@ -81,10 +80,10 @@ malloc_write_fd_syscall(int fd, const void *buf, size_t count) {
long result = syscall(SYS_write, fd, buf, count);
#else
ssize_t result = (ssize_t)write(fd, buf,
#ifdef _WIN32
# ifdef _WIN32
(unsigned int)
#endif
count);
# endif
count);
#endif
return (ssize_t)result;
}
@ -110,10 +109,10 @@ malloc_read_fd_syscall(int fd, void *buf, size_t count) {
long result = syscall(SYS_read, fd, buf, count);
#else
ssize_t result = read(fd, buf,
#ifdef _WIN32
# ifdef _WIN32
(unsigned int)
#endif
count);
# endif
count);
#endif
return (ssize_t)result;
}
@ -122,8 +121,8 @@ static inline ssize_t
malloc_read_fd(int fd, void *buf, size_t count) {
size_t bytes_read = 0;
do {
ssize_t result = malloc_read_fd_syscall(fd,
&((byte_t *)buf)[bytes_read], count - bytes_read);
ssize_t result = malloc_read_fd_syscall(
fd, &((byte_t *)buf)[bytes_read], count - bytes_read);
if (result < 0) {
return result;
} else if (result == 0) {
@ -134,7 +133,8 @@ malloc_read_fd(int fd, void *buf, size_t count) {
return bytes_read;
}
static inline int malloc_open(const char *path, int flags) {
static inline int
malloc_open(const char *path, int flags) {
int fd;
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
fd = (int)syscall(SYS_open, path, flags);
@ -146,7 +146,8 @@ static inline int malloc_open(const char *path, int flags) {
return fd;
}
static inline int malloc_close(int fd) {
static inline int
malloc_close(int fd) {
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
return (int)syscall(SYS_close, fd);
#else
@ -154,11 +155,12 @@ static inline int malloc_close(int fd) {
#endif
}
static inline off_t malloc_lseek(int fd, off_t offset, int whence) {
static inline off_t
malloc_lseek(int fd, off_t offset, int whence) {
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_lseek)
return (off_t)syscall(SYS_lseek, fd, offset, whence);
return (off_t)syscall(SYS_lseek, fd, offset, whence);
#else
return lseek(fd, offset, whence);
return lseek(fd, offset, whence);
#endif
}

View file

@ -31,7 +31,7 @@ struct malloc_mutex_s {
* avoid prefetching a modified cacheline (for the
* unlocking thread).
*/
mutex_prof_data_t prof_data;
mutex_prof_data_t prof_data;
/*
* Hint flag to avoid exclusive cache line contention
* during spin waiting. Placed along with prof_data
@ -39,20 +39,20 @@ struct malloc_mutex_s {
* Modified by the lock owner only (after acquired, and
* before release), and may be read by other threads.
*/
atomic_b_t locked;
atomic_b_t locked;
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
SRWLOCK lock;
# else
CRITICAL_SECTION lock;
# endif
# if _WIN32_WINNT >= 0x0600
SRWLOCK lock;
# else
CRITICAL_SECTION lock;
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
os_unfair_lock lock;
os_unfair_lock lock;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
pthread_mutex_t lock;
malloc_mutex_t *postponed_next;
pthread_mutex_t lock;
malloc_mutex_t *postponed_next;
#else
pthread_mutex_t lock;
pthread_mutex_t lock;
#endif
};
/*
@ -62,82 +62,118 @@ struct malloc_mutex_s {
* memory cost.
*/
#if !defined(JEMALLOC_DEBUG)
witness_t witness;
malloc_mutex_lock_order_t lock_order;
witness_t witness;
malloc_mutex_lock_order_t lock_order;
#endif
};
#if defined(JEMALLOC_DEBUG)
witness_t witness;
malloc_mutex_lock_order_t lock_order;
witness_t witness;
malloc_mutex_lock_order_t lock_order;
#endif
};
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
# define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) ReleaseSRWLockExclusive(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (!TryAcquireSRWLockExclusive(&(m)->lock))
# else
# define MALLOC_MUTEX_LOCK(m) EnterCriticalSection(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) LeaveCriticalSection(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (!TryEnterCriticalSection(&(m)->lock))
# endif
# if _WIN32_WINNT >= 0x0600
# define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) \
ReleaseSRWLockExclusive(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) \
(!TryAcquireSRWLockExclusive(&(m)->lock))
# else
# define MALLOC_MUTEX_LOCK(m) EnterCriticalSection(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) LeaveCriticalSection(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) \
(!TryEnterCriticalSection(&(m)->lock))
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
# define MALLOC_MUTEX_LOCK(m) os_unfair_lock_lock(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) os_unfair_lock_unlock(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (!os_unfair_lock_trylock(&(m)->lock))
# define MALLOC_MUTEX_LOCK(m) os_unfair_lock_lock(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) os_unfair_lock_unlock(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (!os_unfair_lock_trylock(&(m)->lock))
#else
# define MALLOC_MUTEX_LOCK(m) pthread_mutex_lock(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) pthread_mutex_unlock(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0)
# define MALLOC_MUTEX_LOCK(m) pthread_mutex_lock(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) pthread_mutex_unlock(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0)
#endif
#define LOCK_PROF_DATA_INITIALIZER \
{NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0, \
ATOMIC_INIT(0), 0, NULL, 0}
#define LOCK_PROF_DATA_INITIALIZER \
{ \
NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0, \
ATOMIC_INIT(0), 0, NULL, 0 \
}
#ifdef _WIN32
# define MALLOC_MUTEX_INITIALIZER
# define MALLOC_MUTEX_INITIALIZER
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
# if defined(JEMALLOC_DEBUG)
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, ATOMIC_INIT(false), OS_UNFAIR_LOCK_INIT}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
# else
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, ATOMIC_INIT(false), OS_UNFAIR_LOCK_INIT}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
# endif
# if defined(JEMALLOC_DEBUG)
# define MALLOC_MUTEX_INITIALIZER \
{ \
{{LOCK_PROF_DATA_INITIALIZER, \
ATOMIC_INIT(false), OS_UNFAIR_LOCK_INIT}}, \
WITNESS_INITIALIZER( \
"mutex", WITNESS_RANK_OMIT), \
0 \
}
# else
# define MALLOC_MUTEX_INITIALIZER \
{ \
{{LOCK_PROF_DATA_INITIALIZER, \
ATOMIC_INIT(false), OS_UNFAIR_LOCK_INIT}}, \
WITNESS_INITIALIZER( \
"mutex", WITNESS_RANK_OMIT) \
}
# endif
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
# if (defined(JEMALLOC_DEBUG))
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, ATOMIC_INIT(false), PTHREAD_MUTEX_INITIALIZER, NULL}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
# else
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, ATOMIC_INIT(false), PTHREAD_MUTEX_INITIALIZER, NULL}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
# endif
# if (defined(JEMALLOC_DEBUG))
# define MALLOC_MUTEX_INITIALIZER \
{ \
{{LOCK_PROF_DATA_INITIALIZER, \
ATOMIC_INIT(false), \
PTHREAD_MUTEX_INITIALIZER, NULL}}, \
WITNESS_INITIALIZER( \
"mutex", WITNESS_RANK_OMIT), \
0 \
}
# else
# define MALLOC_MUTEX_INITIALIZER \
{ \
{{LOCK_PROF_DATA_INITIALIZER, \
ATOMIC_INIT(false), \
PTHREAD_MUTEX_INITIALIZER, NULL}}, \
WITNESS_INITIALIZER( \
"mutex", WITNESS_RANK_OMIT) \
}
# endif
#else
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
# if defined(JEMALLOC_DEBUG)
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, ATOMIC_INIT(false), PTHREAD_MUTEX_INITIALIZER}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
# else
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, ATOMIC_INIT(false), PTHREAD_MUTEX_INITIALIZER}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
# endif
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
# if defined(JEMALLOC_DEBUG)
# define MALLOC_MUTEX_INITIALIZER \
{ \
{{LOCK_PROF_DATA_INITIALIZER, \
ATOMIC_INIT(false), \
PTHREAD_MUTEX_INITIALIZER}}, \
WITNESS_INITIALIZER( \
"mutex", WITNESS_RANK_OMIT), \
0 \
}
# else
# define MALLOC_MUTEX_INITIALIZER \
{ \
{{LOCK_PROF_DATA_INITIALIZER, \
ATOMIC_INIT(false), \
PTHREAD_MUTEX_INITIALIZER}}, \
WITNESS_INITIALIZER( \
"mutex", WITNESS_RANK_OMIT) \
}
# endif
#endif
#ifdef JEMALLOC_LAZY_LOCK
extern bool isthreaded;
#else
# undef isthreaded /* Undo private_namespace.h definition. */
# define isthreaded true
# undef isthreaded /* Undo private_namespace.h definition. */
# define isthreaded true
#endif
bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
@ -214,12 +250,12 @@ malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) {
if (sum->max_n_thds < data->max_n_thds) {
sum->max_n_thds = data->max_n_thds;
}
uint32_t cur_n_waiting_thds = atomic_load_u32(&sum->n_waiting_thds,
ATOMIC_RELAXED);
uint32_t new_n_waiting_thds = cur_n_waiting_thds + atomic_load_u32(
&data->n_waiting_thds, ATOMIC_RELAXED);
atomic_store_u32(&sum->n_waiting_thds, new_n_waiting_thds,
ATOMIC_RELAXED);
uint32_t cur_n_waiting_thds = atomic_load_u32(
&sum->n_waiting_thds, ATOMIC_RELAXED);
uint32_t new_n_waiting_thds = cur_n_waiting_thds
+ atomic_load_u32(&data->n_waiting_thds, ATOMIC_RELAXED);
atomic_store_u32(
&sum->n_waiting_thds, new_n_waiting_thds, ATOMIC_RELAXED);
sum->n_owner_switches += data->n_owner_switches;
sum->n_lock_ops += data->n_lock_ops;
}
@ -274,16 +310,16 @@ malloc_mutex_prof_copy(mutex_prof_data_t *dst, mutex_prof_data_t *source) {
/* Copy the prof data from mutex for processing. */
static inline void
malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
malloc_mutex_t *mutex) {
malloc_mutex_prof_read(
tsdn_t *tsdn, mutex_prof_data_t *data, malloc_mutex_t *mutex) {
/* Can only read holding the mutex. */
malloc_mutex_assert_owner(tsdn, mutex);
malloc_mutex_prof_copy(data, &mutex->prof_data);
}
static inline void
malloc_mutex_prof_accum(tsdn_t *tsdn, mutex_prof_data_t *data,
malloc_mutex_t *mutex) {
malloc_mutex_prof_accum(
tsdn_t *tsdn, mutex_prof_data_t *data, malloc_mutex_t *mutex) {
mutex_prof_data_t *source = &mutex->prof_data;
/* Can only read holding the mutex. */
malloc_mutex_assert_owner(tsdn, mutex);
@ -305,8 +341,8 @@ malloc_mutex_prof_accum(tsdn_t *tsdn, mutex_prof_data_t *data,
/* Compare the prof data and update to the maximum. */
static inline void
malloc_mutex_prof_max_update(tsdn_t *tsdn, mutex_prof_data_t *data,
malloc_mutex_t *mutex) {
malloc_mutex_prof_max_update(
tsdn_t *tsdn, mutex_prof_data_t *data, malloc_mutex_t *mutex) {
mutex_prof_data_t *source = &mutex->prof_data;
/* Can only read holding the mutex. */
malloc_mutex_assert_owner(tsdn, mutex);

View file

@ -6,76 +6,76 @@
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/tsd_types.h"
#define MUTEX_PROF_GLOBAL_MUTEXES \
OP(background_thread) \
OP(max_per_bg_thd) \
OP(ctl) \
OP(prof) \
OP(prof_thds_data) \
OP(prof_dump) \
OP(prof_recent_alloc) \
OP(prof_recent_dump) \
OP(prof_stats)
#define MUTEX_PROF_GLOBAL_MUTEXES \
OP(background_thread) \
OP(max_per_bg_thd) \
OP(ctl) \
OP(prof) \
OP(prof_thds_data) \
OP(prof_dump) \
OP(prof_recent_alloc) \
OP(prof_recent_dump) \
OP(prof_stats)
typedef enum {
#define OP(mtx) global_prof_mutex_##mtx,
MUTEX_PROF_GLOBAL_MUTEXES
#undef OP
mutex_prof_num_global_mutexes
mutex_prof_num_global_mutexes
} mutex_prof_global_ind_t;
#define MUTEX_PROF_ARENA_MUTEXES \
OP(large) \
OP(extent_avail) \
OP(extents_dirty) \
OP(extents_muzzy) \
OP(extents_retained) \
OP(decay_dirty) \
OP(decay_muzzy) \
OP(base) \
OP(tcache_list) \
OP(hpa_shard) \
OP(hpa_shard_grow) \
OP(hpa_sec)
#define MUTEX_PROF_ARENA_MUTEXES \
OP(large) \
OP(extent_avail) \
OP(extents_dirty) \
OP(extents_muzzy) \
OP(extents_retained) \
OP(decay_dirty) \
OP(decay_muzzy) \
OP(base) \
OP(tcache_list) \
OP(hpa_shard) \
OP(hpa_shard_grow) \
OP(hpa_sec)
typedef enum {
#define OP(mtx) arena_prof_mutex_##mtx,
MUTEX_PROF_ARENA_MUTEXES
#undef OP
mutex_prof_num_arena_mutexes
mutex_prof_num_arena_mutexes
} mutex_prof_arena_ind_t;
/*
* The forth parameter is a boolean value that is true for derived rate counters
* and false for real ones.
*/
#define MUTEX_PROF_UINT64_COUNTERS \
OP(num_ops, uint64_t, "n_lock_ops", false, num_ops) \
OP(num_ops_ps, uint64_t, "(#/sec)", true, num_ops) \
OP(num_wait, uint64_t, "n_waiting", false, num_wait) \
OP(num_wait_ps, uint64_t, "(#/sec)", true, num_wait) \
OP(num_spin_acq, uint64_t, "n_spin_acq", false, num_spin_acq) \
OP(num_spin_acq_ps, uint64_t, "(#/sec)", true, num_spin_acq) \
OP(num_owner_switch, uint64_t, "n_owner_switch", false, num_owner_switch) \
OP(num_owner_switch_ps, uint64_t, "(#/sec)", true, num_owner_switch) \
OP(total_wait_time, uint64_t, "total_wait_ns", false, total_wait_time) \
OP(total_wait_time_ps, uint64_t, "(#/sec)", true, total_wait_time) \
OP(max_wait_time, uint64_t, "max_wait_ns", false, max_wait_time)
#define MUTEX_PROF_UINT64_COUNTERS \
OP(num_ops, uint64_t, "n_lock_ops", false, num_ops) \
OP(num_ops_ps, uint64_t, "(#/sec)", true, num_ops) \
OP(num_wait, uint64_t, "n_waiting", false, num_wait) \
OP(num_wait_ps, uint64_t, "(#/sec)", true, num_wait) \
OP(num_spin_acq, uint64_t, "n_spin_acq", false, num_spin_acq) \
OP(num_spin_acq_ps, uint64_t, "(#/sec)", true, num_spin_acq) \
OP(num_owner_switch, uint64_t, "n_owner_switch", false, \
num_owner_switch) \
OP(num_owner_switch_ps, uint64_t, "(#/sec)", true, num_owner_switch) \
OP(total_wait_time, uint64_t, "total_wait_ns", false, total_wait_time) \
OP(total_wait_time_ps, uint64_t, "(#/sec)", true, total_wait_time) \
OP(max_wait_time, uint64_t, "max_wait_ns", false, max_wait_time)
#define MUTEX_PROF_UINT32_COUNTERS \
OP(max_num_thds, uint32_t, "max_n_thds", false, max_num_thds)
#define MUTEX_PROF_UINT32_COUNTERS \
OP(max_num_thds, uint32_t, "max_n_thds", false, max_num_thds)
#define MUTEX_PROF_COUNTERS \
MUTEX_PROF_UINT64_COUNTERS \
MUTEX_PROF_UINT32_COUNTERS
#define MUTEX_PROF_COUNTERS \
MUTEX_PROF_UINT64_COUNTERS \
MUTEX_PROF_UINT32_COUNTERS
#define OP(counter, type, human, derived, base_counter) mutex_counter_##counter,
#define COUNTER_ENUM(counter_list, t) \
typedef enum { \
counter_list \
mutex_prof_num_##t##_counters \
} mutex_prof_##t##_counter_ind_t;
#define COUNTER_ENUM(counter_list, t) \
typedef enum { \
counter_list mutex_prof_num_##t##_counters \
} mutex_prof_##t##_counter_ind_t;
COUNTER_ENUM(MUTEX_PROF_UINT64_COUNTERS, uint64_t)
COUNTER_ENUM(MUTEX_PROF_UINT32_COUNTERS, uint32_t)
@ -89,17 +89,17 @@ typedef struct {
* contention. We update them once we have the lock.
*/
/* Total time (in nano seconds) spent waiting on this mutex. */
nstime_t tot_wait_time;
nstime_t tot_wait_time;
/* Max time (in nano seconds) spent on a single lock operation. */
nstime_t max_wait_time;
nstime_t max_wait_time;
/* # of times have to wait for this mutex (after spinning). */
uint64_t n_wait_times;
uint64_t n_wait_times;
/* # of times acquired the mutex through local spinning. */
uint64_t n_spin_acquired;
uint64_t n_spin_acquired;
/* Max # of threads waiting for the mutex at the same time. */
uint32_t max_n_thds;
uint32_t max_n_thds;
/* Current # of threads waiting on the lock. Atomic synced. */
atomic_u32_t n_waiting_thds;
atomic_u32_t n_waiting_thds;
/*
* Data touched on the fast path. These are modified right after we
@ -108,11 +108,11 @@ typedef struct {
* cacheline.
*/
/* # of times the mutex holder is different than the previous one. */
uint64_t n_owner_switches;
uint64_t n_owner_switches;
/* Previous mutex holder, to facilitate n_owner_switches. */
tsdn_t *prev_owner;
tsdn_t *prev_owner;
/* # of lock() operations in total. */
uint64_t n_lock_ops;
uint64_t n_lock_ops;
} mutex_prof_data_t;
#endif /* JEMALLOC_INTERNAL_MUTEX_PROF_H */

View file

@ -9,9 +9,11 @@
#define NSTIME_MAGIC ((uint32_t)0xb8a9ce37)
#ifdef JEMALLOC_DEBUG
# define NSTIME_ZERO_INITIALIZER {0, NSTIME_MAGIC}
# define NSTIME_ZERO_INITIALIZER \
{ 0, NSTIME_MAGIC }
#else
# define NSTIME_ZERO_INITIALIZER {0}
# define NSTIME_ZERO_INITIALIZER \
{ 0 }
#endif
typedef struct {
@ -23,43 +25,40 @@ typedef struct {
static const nstime_t nstime_zero = NSTIME_ZERO_INITIALIZER;
void nstime_init(nstime_t *time, uint64_t ns);
void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec);
void nstime_init(nstime_t *time, uint64_t ns);
void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec);
uint64_t nstime_ns(const nstime_t *time);
uint64_t nstime_ms(const nstime_t *time);
uint64_t nstime_sec(const nstime_t *time);
uint64_t nstime_nsec(const nstime_t *time);
void nstime_copy(nstime_t *time, const nstime_t *source);
int nstime_compare(const nstime_t *a, const nstime_t *b);
void nstime_add(nstime_t *time, const nstime_t *addend);
void nstime_iadd(nstime_t *time, uint64_t addend);
void nstime_subtract(nstime_t *time, const nstime_t *subtrahend);
void nstime_isubtract(nstime_t *time, uint64_t subtrahend);
void nstime_imultiply(nstime_t *time, uint64_t multiplier);
void nstime_idivide(nstime_t *time, uint64_t divisor);
void nstime_copy(nstime_t *time, const nstime_t *source);
int nstime_compare(const nstime_t *a, const nstime_t *b);
void nstime_add(nstime_t *time, const nstime_t *addend);
void nstime_iadd(nstime_t *time, uint64_t addend);
void nstime_subtract(nstime_t *time, const nstime_t *subtrahend);
void nstime_isubtract(nstime_t *time, uint64_t subtrahend);
void nstime_imultiply(nstime_t *time, uint64_t multiplier);
void nstime_idivide(nstime_t *time, uint64_t divisor);
uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor);
uint64_t nstime_ns_since(const nstime_t *past);
uint64_t nstime_ms_since(const nstime_t *past);
typedef bool (nstime_monotonic_t)(void);
typedef bool(nstime_monotonic_t)(void);
extern nstime_monotonic_t *JET_MUTABLE nstime_monotonic;
typedef void (nstime_update_t)(nstime_t *);
typedef void(nstime_update_t)(nstime_t *);
extern nstime_update_t *JET_MUTABLE nstime_update;
typedef void (nstime_prof_update_t)(nstime_t *);
typedef void(nstime_prof_update_t)(nstime_t *);
extern nstime_prof_update_t *JET_MUTABLE nstime_prof_update;
void nstime_init_update(nstime_t *time);
void nstime_prof_init_update(nstime_t *time);
enum prof_time_res_e {
prof_time_res_default = 0,
prof_time_res_high = 1
};
enum prof_time_res_e { prof_time_res_default = 0, prof_time_res_high = 1 };
typedef enum prof_time_res_e prof_time_res_t;
extern prof_time_res_t opt_prof_time_res;
extern prof_time_res_t opt_prof_time_res;
extern const char *const prof_time_res_mode_names[];
JEMALLOC_ALWAYS_INLINE void

View file

@ -101,7 +101,7 @@ struct pa_shard_s {
* these configurations to use many fewer arenas, and therefore have a
* higher risk of hot locks.
*/
sec_t hpa_sec;
sec_t hpa_sec;
hpa_shard_t hpa_shard;
/* The source of edata_t objects. */
@ -109,7 +109,7 @@ struct pa_shard_s {
unsigned ind;
malloc_mutex_t *stats_mtx;
malloc_mutex_t *stats_mtx;
pa_shard_stats_t *stats;
/* The emap this shard is tied to. */
@ -121,8 +121,8 @@ struct pa_shard_s {
static inline bool
pa_shard_dont_decay_muzzy(pa_shard_t *shard) {
return ecache_npages_get(&shard->pac.ecache_muzzy) == 0 &&
pac_decay_ms_get(&shard->pac, extent_state_muzzy) <= 0;
return ecache_npages_get(&shard->pac.ecache_muzzy) == 0
&& pac_decay_ms_get(&shard->pac, extent_state_muzzy) <= 0;
}
static inline ehooks_t *
@ -186,10 +186,10 @@ bool pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
* (We could make generated_dirty the return value of course, but this is more
* consistent with the shrink pathway and our error codes here).
*/
void pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata,
bool *deferred_work_generated);
bool pa_decay_ms_set(tsdn_t *tsdn, pa_shard_t *shard, extent_state_t state,
ssize_t decay_ms, pac_purge_eagerness_t eagerness);
void pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata,
bool *deferred_work_generated);
bool pa_decay_ms_set(tsdn_t *tsdn, pa_shard_t *shard, extent_state_t state,
ssize_t decay_ms, pac_purge_eagerness_t eagerness);
ssize_t pa_decay_ms_get(pa_shard_t *shard, extent_state_t state);
/*
@ -199,10 +199,10 @@ ssize_t pa_decay_ms_get(pa_shard_t *shard, extent_state_t state);
* though, the arena, background thread, and PAC modules are tightly interwoven
* in a way that's tricky to extricate, so we only do the HPA-specific parts.
*/
void pa_shard_set_deferral_allowed(tsdn_t *tsdn, pa_shard_t *shard,
bool deferral_allowed);
void pa_shard_do_deferred_work(tsdn_t *tsdn, pa_shard_t *shard);
void pa_shard_try_deferred_work(tsdn_t *tsdn, pa_shard_t *shard);
void pa_shard_set_deferral_allowed(
tsdn_t *tsdn, pa_shard_t *shard, bool deferral_allowed);
void pa_shard_do_deferred_work(tsdn_t *tsdn, pa_shard_t *shard);
void pa_shard_try_deferred_work(tsdn_t *tsdn, pa_shard_t *shard);
uint64_t pa_shard_time_until_deferred_work(tsdn_t *tsdn, pa_shard_t *shard);
/******************************************************************************/
@ -228,8 +228,8 @@ size_t pa_shard_nactive(pa_shard_t *shard);
size_t pa_shard_ndirty(pa_shard_t *shard);
size_t pa_shard_nmuzzy(pa_shard_t *shard);
void pa_shard_basic_stats_merge(pa_shard_t *shard, size_t *nactive,
size_t *ndirty, size_t *nmuzzy);
void pa_shard_basic_stats_merge(
pa_shard_t *shard, size_t *nactive, size_t *ndirty, size_t *nmuzzy);
void pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard,
pa_shard_stats_t *pa_shard_stats_out, pac_estats_t *estats_out,

View file

@ -95,12 +95,12 @@ struct pac_s {
ecache_t ecache_muzzy;
ecache_t ecache_retained;
base_t *base;
emap_t *emap;
base_t *base;
emap_t *emap;
edata_cache_t *edata_cache;
/* The grow info for the retained ecache. */
exp_grow_t exp_grow;
exp_grow_t exp_grow;
malloc_mutex_t grow_mtx;
/* Special allocator for guarded frequently reused extents. */
@ -119,7 +119,7 @@ struct pac_s {
decay_t decay_muzzy; /* muzzy --> retained */
malloc_mutex_t *stats_mtx;
pac_stats_t *stats;
pac_stats_t *stats;
/* Extent serial number generator state. */
atomic_zu_t extent_sn_next;
@ -141,8 +141,8 @@ struct pac_thp_s {
bool thp_madvise;
/* Below fields are protected by the lock. */
malloc_mutex_t lock;
bool auto_thp_switched;
atomic_u_t n_thp_lazy;
bool auto_thp_switched;
atomic_u_t n_thp_lazy;
/*
* List that tracks HUGEPAGE aligned regions that're lazily hugified
* in auto thp mode.
@ -195,11 +195,11 @@ bool pac_maybe_decay_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
*
* Returns true on error (if the new limit is not valid).
*/
bool pac_retain_grow_limit_get_set(tsdn_t *tsdn, pac_t *pac, size_t *old_limit,
size_t *new_limit);
bool pac_retain_grow_limit_get_set(
tsdn_t *tsdn, pac_t *pac, size_t *old_limit, size_t *new_limit);
bool pac_decay_ms_set(tsdn_t *tsdn, pac_t *pac, extent_state_t state,
ssize_t decay_ms, pac_purge_eagerness_t eagerness);
bool pac_decay_ms_set(tsdn_t *tsdn, pac_t *pac, extent_state_t state,
ssize_t decay_ms, pac_purge_eagerness_t eagerness);
ssize_t pac_decay_ms_get(pac_t *pac, extent_state_t state);
void pac_reset(tsdn_t *tsdn, pac_t *pac);

View file

@ -5,27 +5,24 @@
#include "jemalloc/internal/jemalloc_internal_types.h"
/* Actual operating system page size, detected during bootstrap, <= PAGE. */
extern size_t os_page;
extern size_t os_page;
/* Page size. LG_PAGE is determined by the configure script. */
#ifdef PAGE_MASK
# undef PAGE_MASK
# undef PAGE_MASK
#endif
#define PAGE ((size_t)(1U << LG_PAGE))
#define PAGE_MASK ((size_t)(PAGE - 1))
#define PAGE ((size_t)(1U << LG_PAGE))
#define PAGE_MASK ((size_t)(PAGE - 1))
/* Return the page base address for the page containing address a. */
#define PAGE_ADDR2BASE(a) \
ALIGNMENT_ADDR2BASE(a, PAGE)
#define PAGE_ADDR2BASE(a) ALIGNMENT_ADDR2BASE(a, PAGE)
/* Return the smallest pagesize multiple that is >= s. */
#define PAGE_CEILING(s) \
(((s) + PAGE_MASK) & ~PAGE_MASK)
#define PAGE_CEILING(s) (((s) + PAGE_MASK) & ~PAGE_MASK)
/* Return the largest pagesize multiple that is <=s. */
#define PAGE_FLOOR(s) \
((s) & ~PAGE_MASK)
#define PAGE_FLOOR(s) ((s) & ~PAGE_MASK)
/* Huge page size. LG_HUGEPAGE is determined by the configure script. */
#define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE))
#define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1))
#define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE))
#define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1))
/*
* Used to validate that the hugepage size is not unexpectedly high. The huge
@ -36,7 +33,7 @@ extern size_t os_page;
#define HUGEPAGE_MAX_EXPECTED_SIZE ((size_t)(16U << 20))
#if LG_HUGEPAGE != 0
# define HUGEPAGE_PAGES (HUGEPAGE / PAGE)
# define HUGEPAGE_PAGES (HUGEPAGE / PAGE)
#else
/*
* It's convenient to define arrays (or bitmaps) of HUGEPAGE_PAGES lengths. If
@ -45,19 +42,17 @@ extern size_t os_page;
* that this value is at least 1. (We won't ever run in this degraded state;
* hpa_supported() returns false in this case.
*/
# define HUGEPAGE_PAGES 1
# define HUGEPAGE_PAGES 1
#endif
/* Return the huge page base address for the huge page containing address a. */
#define HUGEPAGE_ADDR2BASE(a) \
ALIGNMENT_ADDR2BASE(a, HUGEPAGE)
#define HUGEPAGE_ADDR2BASE(a) ALIGNMENT_ADDR2BASE(a, HUGEPAGE)
/* Return the smallest pagesize multiple that is >= s. */
#define HUGEPAGE_CEILING(s) \
(((s) + HUGEPAGE_MASK) & ~HUGEPAGE_MASK)
#define HUGEPAGE_CEILING(s) (((s) + HUGEPAGE_MASK) & ~HUGEPAGE_MASK)
/* PAGES_CAN_PURGE_LAZY is defined if lazy purging is supported. */
#if defined(_WIN32) || defined(JEMALLOC_PURGE_MADVISE_FREE)
# define PAGES_CAN_PURGE_LAZY
# define PAGES_CAN_PURGE_LAZY
#endif
/*
* PAGES_CAN_PURGE_FORCED is defined if forced purging is supported.
@ -68,10 +63,11 @@ extern size_t os_page;
* next step after purging on Windows anyway, there's no point in adding such
* complexity.
*/
#if !defined(_WIN32) && ((defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)) || \
defined(JEMALLOC_MAPS_COALESCE))
# define PAGES_CAN_PURGE_FORCED
#if !defined(_WIN32) \
&& ((defined(JEMALLOC_PURGE_MADVISE_DONTNEED) \
&& defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)) \
|| defined(JEMALLOC_MAPS_COALESCE))
# define PAGES_CAN_PURGE_FORCED
#endif
static const bool pages_can_purge_lazy =
@ -90,7 +86,7 @@ static const bool pages_can_purge_forced =
;
#if defined(JEMALLOC_HAVE_MADVISE_HUGE) || defined(JEMALLOC_HAVE_MEMCNTL)
# define PAGES_CAN_HUGIFY
# define PAGES_CAN_HUGIFY
#endif
static const bool pages_can_hugify =
@ -102,25 +98,25 @@ static const bool pages_can_hugify =
;
typedef enum {
thp_mode_default = 0, /* Do not change hugepage settings. */
thp_mode_always = 1, /* Always set MADV_HUGEPAGE. */
thp_mode_never = 2, /* Always set MADV_NOHUGEPAGE. */
thp_mode_default = 0, /* Do not change hugepage settings. */
thp_mode_always = 1, /* Always set MADV_HUGEPAGE. */
thp_mode_never = 2, /* Always set MADV_NOHUGEPAGE. */
thp_mode_names_limit = 3, /* Used for option processing. */
thp_mode_not_supported = 3 /* No THP support detected. */
thp_mode_names_limit = 3, /* Used for option processing. */
thp_mode_not_supported = 3 /* No THP support detected. */
} thp_mode_t;
#define THP_MODE_DEFAULT thp_mode_default
extern thp_mode_t opt_thp;
extern thp_mode_t init_system_thp_mode; /* Initial system wide state. */
extern thp_mode_t opt_thp;
extern thp_mode_t init_system_thp_mode; /* Initial system wide state. */
extern const char *const thp_mode_names[];
void *pages_map(void *addr, size_t size, size_t alignment, bool *commit);
void pages_unmap(void *addr, size_t size);
bool pages_commit(void *addr, size_t size);
bool pages_decommit(void *addr, size_t size);
bool pages_purge_lazy(void *addr, size_t size);
bool pages_purge_forced(void *addr, size_t size);
void pages_unmap(void *addr, size_t size);
bool pages_commit(void *addr, size_t size);
bool pages_decommit(void *addr, size_t size);
bool pages_purge_lazy(void *addr, size_t size);
bool pages_purge_forced(void *addr, size_t size);
bool pages_purge_process_madvise(void *vec, size_t ven_len, size_t total_bytes);
bool pages_huge(void *addr, size_t size);
bool pages_nohuge(void *addr, size_t size);
@ -128,7 +124,7 @@ bool pages_collapse(void *addr, size_t size);
bool pages_dontdump(void *addr, size_t size);
bool pages_dodump(void *addr, size_t size);
bool pages_boot(void);
void pages_set_thp_state (void *ptr, size_t size);
void pages_set_thp_state(void *ptr, size_t size);
void pages_mark_guards(void *head, void *tail);
void pages_unmark_guards(void *head, void *tail);

View file

@ -41,9 +41,8 @@ struct pai_s {
*/
static inline edata_t *
pai_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment,
bool zero, bool guarded, bool frequent_reuse,
bool *deferred_work_generated) {
pai_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
bool guarded, bool frequent_reuse, bool *deferred_work_generated) {
return self->alloc(tsdn, self, size, alignment, zero, guarded,
frequent_reuse, deferred_work_generated);
}
@ -66,13 +65,13 @@ pai_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
static inline bool
pai_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
size_t new_size, bool *deferred_work_generated) {
return self->shrink(tsdn, self, edata, old_size, new_size,
deferred_work_generated);
return self->shrink(
tsdn, self, edata, old_size, new_size, deferred_work_generated);
}
static inline void
pai_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
bool *deferred_work_generated) {
pai_dalloc(
tsdn_t *tsdn, pai_t *self, edata_t *edata, bool *deferred_work_generated) {
self->dalloc(tsdn, self, edata, deferred_work_generated);
}

View file

@ -14,7 +14,8 @@ struct peak_s {
uint64_t adjustment;
};
#define PEAK_INITIALIZER {0, 0}
#define PEAK_INITIALIZER \
{ 0, 0 }
static inline uint64_t
peak_max(peak_t *peak) {

View file

@ -20,7 +20,7 @@
/* Update the peak with current tsd state. */
void peak_event_update(tsd_t *tsd);
/* Set current state to zero. */
void peak_event_zero(tsd_t *tsd);
void peak_event_zero(tsd_t *tsd);
uint64_t peak_event_max(tsd_t *tsd);
extern te_base_cb_t peak_te_handler;

View file

@ -129,8 +129,7 @@ phn_prev_set(void *phn, void *prev, size_t offset) {
}
JEMALLOC_ALWAYS_INLINE void
phn_merge_ordered(void *phn0, void *phn1, size_t offset,
ph_cmp_t cmp) {
phn_merge_ordered(void *phn0, void *phn1, size_t offset, ph_cmp_t cmp) {
void *phn0child;
assert(phn0 != NULL);
@ -361,15 +360,14 @@ ph_insert(ph_t *ph, void *phn, size_t offset, ph_cmp_t cmp) {
phn_next_set(phn, phn_next_get(ph->root, offset), offset);
if (phn_next_get(ph->root, offset) != NULL) {
phn_prev_set(phn_next_get(ph->root, offset), phn,
offset);
phn_prev_set(phn_next_get(ph->root, offset), phn, offset);
}
phn_prev_set(phn, ph->root, offset);
phn_next_set(ph->root, phn, offset);
ph->auxcount++;
unsigned nmerges = ffs_zu(ph->auxcount);
bool done = false;
bool done = false;
for (unsigned i = 0; i < nmerges && !done; i++) {
done = ph_try_aux_merge_pair(ph, offset, cmp);
}
@ -387,7 +385,6 @@ ph_remove_first(ph_t *ph, size_t offset, ph_cmp_t cmp) {
ph->root = ph_merge_children(ph->root, offset, cmp);
return ret;
}
JEMALLOC_ALWAYS_INLINE void
@ -398,11 +395,11 @@ ph_remove(ph_t *ph, void *phn, size_t offset, ph_cmp_t cmp) {
return;
}
void* prev = phn_prev_get(phn, offset);
void* next = phn_next_get(phn, offset);
void *prev = phn_prev_get(phn, offset);
void *next = phn_next_get(phn, offset);
/* If we have children, then we integrate them back in the heap. */
void* replace = ph_merge_children(phn, offset, cmp);
void *replace = ph_merge_children(phn, offset, cmp);
if (replace != NULL) {
phn_next_set(replace, next, offset);
if (next != NULL) {
@ -438,16 +435,16 @@ ph_enumerate_vars_init(ph_enumerate_vars_t *vars, uint16_t max_visit_num,
* max_queue_size must be able to support max_visit_num, which means
* the queue will not overflow before reaching max_visit_num.
*/
assert(vars->max_queue_size >= (vars->max_visit_num + 1)/2);
assert(vars->max_queue_size >= (vars->max_visit_num + 1) / 2);
}
JEMALLOC_ALWAYS_INLINE void
ph_enumerate_queue_push(void *phn, void **bfs_queue,
ph_enumerate_vars_t *vars) {
ph_enumerate_queue_push(
void *phn, void **bfs_queue, ph_enumerate_vars_t *vars) {
assert(vars->queue_size < vars->max_queue_size);
bfs_queue[vars->rear] = phn;
vars->rear = (vars->rear + 1) % vars->max_queue_size;
(vars->queue_size) ++;
(vars->queue_size)++;
}
JEMALLOC_ALWAYS_INLINE void *
@ -456,11 +453,10 @@ ph_enumerate_queue_pop(void **bfs_queue, ph_enumerate_vars_t *vars) {
assert(vars->queue_size <= vars->max_queue_size);
void *ret = bfs_queue[vars->front];
vars->front = (vars->front + 1) % vars->max_queue_size;
(vars->queue_size) --;
(vars->queue_size)--;
return ret;
}
/*
* The two functions below offer a solution to enumerate the pairing heap.
* Whe enumerating, always call ph_enumerate_prepare first to prepare the queue
@ -478,13 +474,13 @@ ph_enumerate_prepare(ph_t *ph, void **bfs_queue, ph_enumerate_vars_t *vars,
}
JEMALLOC_ALWAYS_INLINE void *
ph_enumerate_next(ph_t *ph, size_t offset, void **bfs_queue,
ph_enumerate_vars_t *vars) {
ph_enumerate_next(
ph_t *ph, size_t offset, void **bfs_queue, ph_enumerate_vars_t *vars) {
if (vars->queue_size == 0) {
return NULL;
}
(vars->visited_num) ++;
(vars->visited_num)++;
if (vars->visited_num > vars->max_visit_num) {
return NULL;
}
@ -502,109 +498,97 @@ ph_enumerate_next(ph_t *ph, size_t offset, void **bfs_queue,
return ret;
}
#define ph_structs(a_prefix, a_type, a_max_queue_size) \
typedef struct { \
phn_link_t link; \
} a_prefix##_link_t; \
\
typedef struct { \
ph_t ph; \
} a_prefix##_t; \
\
typedef struct { \
void *bfs_queue[a_max_queue_size]; \
ph_enumerate_vars_t vars; \
} a_prefix##_enumerate_helper_t;
#define ph_structs(a_prefix, a_type, a_max_queue_size) \
typedef struct { \
phn_link_t link; \
} a_prefix##_link_t; \
\
typedef struct { \
ph_t ph; \
} a_prefix##_t; \
\
typedef struct { \
void *bfs_queue[a_max_queue_size]; \
ph_enumerate_vars_t vars; \
} a_prefix##_enumerate_helper_t;
/*
* The ph_proto() macro generates function prototypes that correspond to the
* functions generated by an equivalently parameterized call to ph_gen().
*/
#define ph_proto(a_attr, a_prefix, a_type) \
\
a_attr void a_prefix##_new(a_prefix##_t *ph); \
a_attr bool a_prefix##_empty(a_prefix##_t *ph); \
a_attr a_type *a_prefix##_first(a_prefix##_t *ph); \
a_attr a_type *a_prefix##_any(a_prefix##_t *ph); \
a_attr void a_prefix##_insert(a_prefix##_t *ph, a_type *phn); \
a_attr a_type *a_prefix##_remove_first(a_prefix##_t *ph); \
a_attr void a_prefix##_remove(a_prefix##_t *ph, a_type *phn); \
a_attr a_type *a_prefix##_remove_any(a_prefix##_t *ph); \
a_attr void a_prefix##_enumerate_prepare(a_prefix##_t *ph, \
a_prefix##_enumerate_helper_t *helper, uint16_t max_visit_num, \
uint16_t max_queue_size); \
a_attr a_type *a_prefix##_enumerate_next(a_prefix##_t *ph, \
a_prefix##_enumerate_helper_t *helper);
#define ph_proto(a_attr, a_prefix, a_type) \
\
a_attr void a_prefix##_new(a_prefix##_t *ph); \
a_attr bool a_prefix##_empty(a_prefix##_t *ph); \
a_attr a_type *a_prefix##_first(a_prefix##_t *ph); \
a_attr a_type *a_prefix##_any(a_prefix##_t *ph); \
a_attr void a_prefix##_insert(a_prefix##_t *ph, a_type *phn); \
a_attr a_type *a_prefix##_remove_first(a_prefix##_t *ph); \
a_attr void a_prefix##_remove(a_prefix##_t *ph, a_type *phn); \
a_attr a_type *a_prefix##_remove_any(a_prefix##_t *ph); \
a_attr void a_prefix##_enumerate_prepare(a_prefix##_t *ph, \
a_prefix##_enumerate_helper_t *helper, uint16_t max_visit_num, \
uint16_t max_queue_size); \
a_attr a_type *a_prefix##_enumerate_next( \
a_prefix##_t *ph, a_prefix##_enumerate_helper_t *helper);
/* The ph_gen() macro generates a type-specific pairing heap implementation. */
#define ph_gen(a_attr, a_prefix, a_type, a_field, a_cmp) \
JEMALLOC_ALWAYS_INLINE int \
a_prefix##_ph_cmp(void *a, void *b) { \
return a_cmp((a_type *)a, (a_type *)b); \
} \
\
a_attr void \
a_prefix##_new(a_prefix##_t *ph) { \
ph_new(&ph->ph); \
} \
\
a_attr bool \
a_prefix##_empty(a_prefix##_t *ph) { \
return ph_empty(&ph->ph); \
} \
\
a_attr a_type * \
a_prefix##_first(a_prefix##_t *ph) { \
return ph_first(&ph->ph, offsetof(a_type, a_field), \
&a_prefix##_ph_cmp); \
} \
\
a_attr a_type * \
a_prefix##_any(a_prefix##_t *ph) { \
return ph_any(&ph->ph, offsetof(a_type, a_field)); \
} \
\
a_attr void \
a_prefix##_insert(a_prefix##_t *ph, a_type *phn) { \
ph_insert(&ph->ph, phn, offsetof(a_type, a_field), \
a_prefix##_ph_cmp); \
} \
\
a_attr a_type * \
a_prefix##_remove_first(a_prefix##_t *ph) { \
return ph_remove_first(&ph->ph, offsetof(a_type, a_field), \
a_prefix##_ph_cmp); \
} \
\
a_attr void \
a_prefix##_remove(a_prefix##_t *ph, a_type *phn) { \
ph_remove(&ph->ph, phn, offsetof(a_type, a_field), \
a_prefix##_ph_cmp); \
} \
\
a_attr a_type * \
a_prefix##_remove_any(a_prefix##_t *ph) { \
a_type *ret = a_prefix##_any(ph); \
if (ret != NULL) { \
a_prefix##_remove(ph, ret); \
} \
return ret; \
} \
\
a_attr void \
a_prefix##_enumerate_prepare(a_prefix##_t *ph, \
a_prefix##_enumerate_helper_t *helper, uint16_t max_visit_num, \
uint16_t max_queue_size) { \
ph_enumerate_prepare(&ph->ph, helper->bfs_queue, &helper->vars, \
max_visit_num, max_queue_size); \
} \
\
a_attr a_type * \
a_prefix##_enumerate_next(a_prefix##_t *ph, \
a_prefix##_enumerate_helper_t *helper) { \
return ph_enumerate_next(&ph->ph, offsetof(a_type, a_field), \
helper->bfs_queue, &helper->vars); \
}
#define ph_gen(a_attr, a_prefix, a_type, a_field, a_cmp) \
JEMALLOC_ALWAYS_INLINE int a_prefix##_ph_cmp(void *a, void *b) { \
return a_cmp((a_type *)a, (a_type *)b); \
} \
\
a_attr void a_prefix##_new(a_prefix##_t *ph) { \
ph_new(&ph->ph); \
} \
\
a_attr bool a_prefix##_empty(a_prefix##_t *ph) { \
return ph_empty(&ph->ph); \
} \
\
a_attr a_type *a_prefix##_first(a_prefix##_t *ph) { \
return ph_first( \
&ph->ph, offsetof(a_type, a_field), &a_prefix##_ph_cmp); \
} \
\
a_attr a_type *a_prefix##_any(a_prefix##_t *ph) { \
return ph_any(&ph->ph, offsetof(a_type, a_field)); \
} \
\
a_attr void a_prefix##_insert(a_prefix##_t *ph, a_type *phn) { \
ph_insert(&ph->ph, phn, offsetof(a_type, a_field), \
a_prefix##_ph_cmp); \
} \
\
a_attr a_type *a_prefix##_remove_first(a_prefix##_t *ph) { \
return ph_remove_first( \
&ph->ph, offsetof(a_type, a_field), a_prefix##_ph_cmp); \
} \
\
a_attr void a_prefix##_remove(a_prefix##_t *ph, a_type *phn) { \
ph_remove(&ph->ph, phn, offsetof(a_type, a_field), \
a_prefix##_ph_cmp); \
} \
\
a_attr a_type *a_prefix##_remove_any(a_prefix##_t *ph) { \
a_type *ret = a_prefix##_any(ph); \
if (ret != NULL) { \
a_prefix##_remove(ph, ret); \
} \
return ret; \
} \
\
a_attr void a_prefix##_enumerate_prepare(a_prefix##_t *ph, \
a_prefix##_enumerate_helper_t *helper, uint16_t max_visit_num, \
uint16_t max_queue_size) { \
ph_enumerate_prepare(&ph->ph, helper->bfs_queue, \
&helper->vars, max_visit_num, max_queue_size); \
} \
\
a_attr a_type *a_prefix##_enumerate_next( \
a_prefix##_t *ph, a_prefix##_enumerate_helper_t *helper) { \
return ph_enumerate_next(&ph->ph, offsetof(a_type, a_field), \
helper->bfs_queue, &helper->vars); \
}
#endif /* JEMALLOC_INTERNAL_PH_H */

View file

@ -26,11 +26,11 @@
/******************************************************************************/
/* INTERNAL DEFINITIONS -- IGNORE */
/******************************************************************************/
#define PRNG_A_32 UINT32_C(1103515241)
#define PRNG_C_32 UINT32_C(12347)
#define PRNG_A_32 UINT32_C(1103515241)
#define PRNG_C_32 UINT32_C(12347)
#define PRNG_A_64 UINT64_C(6364136223846793005)
#define PRNG_C_64 UINT64_C(1442695040888963407)
#define PRNG_A_64 UINT64_C(6364136223846793005)
#define PRNG_C_64 UINT64_C(1442695040888963407)
JEMALLOC_ALWAYS_INLINE uint32_t
prng_state_next_u32(uint32_t state) {
@ -49,7 +49,7 @@ prng_state_next_zu(size_t state) {
#elif LG_SIZEOF_PTR == 3
return (state * PRNG_A_64) + PRNG_C_64;
#else
#error Unsupported pointer size
# error Unsupported pointer size
#endif
}

View file

@ -17,21 +17,21 @@ extern size_t prof_shifted_unbiased_cnt[PROF_SC_NSIZES];
void prof_bt_hash(const void *key, size_t r_hash[2]);
bool prof_bt_keycomp(const void *k1, const void *k2);
bool prof_data_init(tsd_t *tsd);
bool prof_data_init(tsd_t *tsd);
prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt);
int prof_thread_name_set_impl(tsd_t *tsd, const char *thread_name);
void prof_unbias_map_init(void);
int prof_thread_name_set_impl(tsd_t *tsd, const char *thread_name);
void prof_unbias_map_init(void);
void prof_dump_impl(tsd_t *tsd, write_cb_t *prof_dump_write, void *cbopaque,
prof_tdata_t *tdata, bool leakcheck);
prof_tdata_t * prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid,
prof_tdata_t *prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid,
uint64_t thr_discrim, char *thread_name, bool active);
void prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata);
void prof_reset(tsd_t *tsd, size_t lg_sample);
void prof_tctx_try_destroy(tsd_t *tsd, prof_tctx_t *tctx);
void prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata);
void prof_reset(tsd_t *tsd, size_t lg_sample);
void prof_tctx_try_destroy(tsd_t *tsd, prof_tctx_t *tctx);
/* Used in unit tests. */
size_t prof_tdata_count(void);
size_t prof_bt_count(void);
void prof_cnt_all(prof_cnt_t *cnt_all);
void prof_cnt_all(prof_cnt_t *cnt_all);
#endif /* JEMALLOC_INTERNAL_PROF_DATA_H */

View file

@ -7,21 +7,22 @@
#include "jemalloc/internal/prof_hook.h"
#include "jemalloc/internal/thread_event_registry.h"
extern bool opt_prof;
extern bool opt_prof_active;
extern bool opt_prof_thread_active_init;
extern bool opt_prof;
extern bool opt_prof_active;
extern bool opt_prof_thread_active_init;
extern unsigned opt_prof_bt_max;
extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
extern size_t opt_experimental_lg_prof_threshold; /* Mean bytes between thresholds. */
extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
extern bool opt_prof_gdump; /* High-water memory dumping. */
extern bool opt_prof_final; /* Final profile dumping. */
extern bool opt_prof_leak; /* Dump leak summary at exit. */
extern bool opt_prof_leak_error; /* Exit with error code if memory leaked */
extern bool opt_prof_accum; /* Report cumulative bytes. */
extern bool opt_prof_log; /* Turn logging on at boot. */
extern char opt_prof_prefix[
/* Minimize memory bloat for non-prof builds. */
extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
extern size_t
opt_experimental_lg_prof_threshold; /* Mean bytes between thresholds. */
extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
extern bool opt_prof_gdump; /* High-water memory dumping. */
extern bool opt_prof_final; /* Final profile dumping. */
extern bool opt_prof_leak; /* Dump leak summary at exit. */
extern bool opt_prof_leak_error; /* Exit with error code if memory leaked */
extern bool opt_prof_accum; /* Report cumulative bytes. */
extern bool opt_prof_log; /* Turn logging on at boot. */
extern char opt_prof_prefix[
/* Minimize memory bloat for non-prof builds. */
#ifdef JEMALLOC_PROF
PATH_MAX +
#endif
@ -57,19 +58,19 @@ extern size_t lg_prof_sample;
extern bool prof_booted;
void prof_backtrace_hook_set(prof_backtrace_hook_t hook);
void prof_backtrace_hook_set(prof_backtrace_hook_t hook);
prof_backtrace_hook_t prof_backtrace_hook_get(void);
void prof_dump_hook_set(prof_dump_hook_t hook);
void prof_dump_hook_set(prof_dump_hook_t hook);
prof_dump_hook_t prof_dump_hook_get(void);
void prof_sample_hook_set(prof_sample_hook_t hook);
void prof_sample_hook_set(prof_sample_hook_t hook);
prof_sample_hook_t prof_sample_hook_get(void);
void prof_sample_free_hook_set(prof_sample_free_hook_t hook);
void prof_sample_free_hook_set(prof_sample_free_hook_t hook);
prof_sample_free_hook_t prof_sample_free_hook_get(void);
void prof_threshold_hook_set(prof_threshold_hook_t hook);
void prof_threshold_hook_set(prof_threshold_hook_t hook);
prof_threshold_hook_t prof_threshold_hook_get(void);
/* Functions only accessed in prof_inlines.h */
@ -77,33 +78,33 @@ prof_tdata_t *prof_tdata_init(tsd_t *tsd);
prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx);
void prof_malloc_sample_object(tsd_t *tsd, const void *ptr, size_t size,
size_t usize, prof_tctx_t *tctx);
void prof_free_sampled_object(tsd_t *tsd, const void *ptr, size_t usize,
prof_info_t *prof_info);
void prof_malloc_sample_object(
tsd_t *tsd, const void *ptr, size_t size, size_t usize, prof_tctx_t *tctx);
void prof_free_sampled_object(
tsd_t *tsd, const void *ptr, size_t usize, prof_info_t *prof_info);
prof_tctx_t *prof_tctx_create(tsd_t *tsd);
void prof_idump(tsdn_t *tsdn);
bool prof_mdump(tsd_t *tsd, const char *filename);
void prof_gdump(tsdn_t *tsdn);
void prof_idump(tsdn_t *tsdn);
bool prof_mdump(tsd_t *tsd, const char *filename);
void prof_gdump(tsdn_t *tsdn);
void prof_tdata_cleanup(tsd_t *tsd);
bool prof_active_get(tsdn_t *tsdn);
bool prof_active_set(tsdn_t *tsdn, bool active);
void prof_tdata_cleanup(tsd_t *tsd);
bool prof_active_get(tsdn_t *tsdn);
bool prof_active_set(tsdn_t *tsdn, bool active);
const char *prof_thread_name_get(tsd_t *tsd);
int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
bool prof_thread_active_get(tsd_t *tsd);
bool prof_thread_active_set(tsd_t *tsd, bool active);
bool prof_thread_active_init_get(tsdn_t *tsdn);
bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init);
bool prof_gdump_get(tsdn_t *tsdn);
bool prof_gdump_set(tsdn_t *tsdn, bool active);
void prof_boot0(void);
void prof_boot1(void);
bool prof_boot2(tsd_t *tsd, base_t *base);
void prof_prefork0(tsdn_t *tsdn);
void prof_prefork1(tsdn_t *tsdn);
void prof_postfork_parent(tsdn_t *tsdn);
void prof_postfork_child(tsdn_t *tsdn);
int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
bool prof_thread_active_get(tsd_t *tsd);
bool prof_thread_active_set(tsd_t *tsd, bool active);
bool prof_thread_active_init_get(tsdn_t *tsdn);
bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init);
bool prof_gdump_get(tsdn_t *tsdn);
bool prof_gdump_set(tsdn_t *tsdn, bool active);
void prof_boot0(void);
void prof_boot1(void);
bool prof_boot2(tsd_t *tsd, base_t *base);
void prof_prefork0(tsdn_t *tsdn);
void prof_prefork1(tsdn_t *tsdn);
void prof_postfork_parent(tsdn_t *tsdn);
void prof_postfork_child(tsdn_t *tsdn);
uint64_t prof_sample_new_event_wait(tsd_t *tsd);
uint64_t tsd_prof_sample_event_wait_get(tsd_t *tsd);
@ -130,8 +131,8 @@ uint64_t tsd_prof_sample_event_wait_get(tsd_t *tsd);
*/
JEMALLOC_ALWAYS_INLINE bool
te_prof_sample_event_lookahead_surplus(tsd_t *tsd, size_t usize,
size_t *surplus) {
te_prof_sample_event_lookahead_surplus(
tsd_t *tsd, size_t usize, size_t *surplus) {
if (surplus != NULL) {
/*
* This is a dead store: the surplus will be overwritten before
@ -146,8 +147,8 @@ te_prof_sample_event_lookahead_surplus(tsd_t *tsd, size_t usize,
return false;
}
/* The subtraction is intentionally susceptible to underflow. */
uint64_t accumbytes = tsd_thread_allocated_get(tsd) + usize -
tsd_thread_allocated_last_event_get(tsd);
uint64_t accumbytes = tsd_thread_allocated_get(tsd) + usize
- tsd_thread_allocated_last_event_get(tsd);
uint64_t sample_wait = tsd_prof_sample_event_wait_get(tsd);
if (accumbytes < sample_wait) {
return false;

View file

@ -21,7 +21,8 @@ typedef void (*prof_backtrace_hook_t)(void **, unsigned *, unsigned);
typedef void (*prof_dump_hook_t)(const char *filename);
/* ptr, size, backtrace vector, backtrace vector length, usize */
typedef void (*prof_sample_hook_t)(const void *ptr, size_t size, void **backtrace, unsigned backtrace_length, size_t usize);
typedef void (*prof_sample_hook_t)(const void *ptr, size_t size,
void **backtrace, unsigned backtrace_length, size_t usize);
/* ptr, size */
typedef void (*prof_sample_free_hook_t)(const void *, size_t);
@ -29,6 +30,7 @@ typedef void (*prof_sample_free_hook_t)(const void *, size_t);
/*
* A callback hook that notifies when an allocation threshold has been crossed.
*/
typedef void (*prof_threshold_hook_t)(uint64_t alloc, uint64_t dealloc, uint64_t peak);
typedef void (*prof_threshold_hook_t)(
uint64_t alloc, uint64_t dealloc, uint64_t peak);
#endif /* JEMALLOC_INTERNAL_PROF_HOOK_H */

View file

@ -164,8 +164,8 @@ JEMALLOC_ALWAYS_INLINE prof_tctx_t *
prof_alloc_prep(tsd_t *tsd, bool prof_active, bool sample_event) {
prof_tctx_t *ret;
if (!prof_active ||
likely(prof_sample_should_skip(tsd, sample_event))) {
if (!prof_active
|| likely(prof_sample_should_skip(tsd, sample_event))) {
ret = PROF_TCTX_SENTINEL;
} else {
ret = prof_tctx_create(tsd);
@ -242,8 +242,8 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t size, size_t usize,
* counters.
*/
if (unlikely(old_sampled)) {
prof_free_sampled_object(tsd, old_ptr, old_usize,
old_prof_info);
prof_free_sampled_object(
tsd, old_ptr, old_usize, old_prof_info);
}
}
@ -254,9 +254,10 @@ prof_sample_align(size_t usize, size_t orig_align) {
* w/o metadata lookup.
*/
assert(opt_prof);
return (orig_align < PROF_SAMPLE_ALIGNMENT &&
(sz_can_use_slab(usize) || opt_cache_oblivious)) ?
PROF_SAMPLE_ALIGNMENT : orig_align;
return (orig_align < PROF_SAMPLE_ALIGNMENT
&& (sz_can_use_slab(usize) || opt_cache_oblivious))
? PROF_SAMPLE_ALIGNMENT
: orig_align;
}
JEMALLOC_ALWAYS_INLINE bool
@ -271,8 +272,8 @@ prof_sampled(tsd_t *tsd, const void *ptr) {
}
JEMALLOC_ALWAYS_INLINE void
prof_free(tsd_t *tsd, const void *ptr, size_t usize,
emap_alloc_ctx_t *alloc_ctx) {
prof_free(
tsd_t *tsd, const void *ptr, size_t usize, emap_alloc_ctx_t *alloc_ctx) {
prof_info_t prof_info;
prof_info_get_and_reset_recent(tsd, ptr, alloc_ctx, &prof_info);

View file

@ -13,9 +13,9 @@ bool prof_log_init(tsd_t *tsdn);
size_t prof_log_bt_count(void);
size_t prof_log_alloc_count(void);
size_t prof_log_thr_count(void);
bool prof_log_is_logging(void);
bool prof_log_rep_check(void);
void prof_log_dummy_set(bool new_value);
bool prof_log_is_logging(void);
bool prof_log_rep_check(void);
void prof_log_dummy_set(bool new_value);
bool prof_log_start(tsdn_t *tsdn, const char *filename);
bool prof_log_stop(tsdn_t *tsdn);

View file

@ -10,29 +10,29 @@
struct prof_bt_s {
/* Backtrace, stored as len program counters. */
void **vec;
unsigned len;
void **vec;
unsigned len;
};
#ifdef JEMALLOC_PROF_LIBGCC
/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
typedef struct {
void **vec;
unsigned *len;
unsigned max;
void **vec;
unsigned *len;
unsigned max;
} prof_unwind_data_t;
#endif
struct prof_cnt_s {
/* Profiling counters. */
uint64_t curobjs;
uint64_t curobjs_shifted_unbiased;
uint64_t curbytes;
uint64_t curbytes_unbiased;
uint64_t accumobjs;
uint64_t accumobjs_shifted_unbiased;
uint64_t accumbytes;
uint64_t accumbytes_unbiased;
uint64_t curobjs;
uint64_t curobjs_shifted_unbiased;
uint64_t curbytes;
uint64_t curbytes_unbiased;
uint64_t accumobjs;
uint64_t accumobjs_shifted_unbiased;
uint64_t accumbytes;
uint64_t accumbytes_unbiased;
};
typedef enum {
@ -44,26 +44,26 @@ typedef enum {
struct prof_tctx_s {
/* Thread data for thread that performed the allocation. */
prof_tdata_t *tdata;
prof_tdata_t *tdata;
/*
* Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
* defunct during teardown.
*/
uint64_t thr_uid;
uint64_t thr_discrim;
uint64_t thr_uid;
uint64_t thr_discrim;
/*
* Reference count of how many times this tctx object is referenced in
* recent allocation / deallocation records, protected by tdata->lock.
*/
uint64_t recent_count;
uint64_t recent_count;
/* Profiling counters, protected by tdata->lock. */
prof_cnt_t cnts;
prof_cnt_t cnts;
/* Associated global context. */
prof_gctx_t *gctx;
prof_gctx_t *gctx;
/*
* UID that distinguishes multiple tctx's created by the same thread,
@ -78,40 +78,40 @@ struct prof_tctx_s {
* threshold can be hit again before the first consumer finishes
* executing prof_tctx_destroy().
*/
uint64_t tctx_uid;
uint64_t tctx_uid;
/* Linkage into gctx's tctxs. */
rb_node(prof_tctx_t) tctx_link;
rb_node(prof_tctx_t) tctx_link;
/*
* True during prof_alloc_prep()..prof_malloc_sample_object(), prevents
* sample vs destroy race.
*/
bool prepared;
bool prepared;
/* Current dump-related state, protected by gctx->lock. */
prof_tctx_state_t state;
prof_tctx_state_t state;
/*
* Copy of cnts snapshotted during early dump phase, protected by
* dump_mtx.
*/
prof_cnt_t dump_cnts;
prof_cnt_t dump_cnts;
};
typedef rb_tree(prof_tctx_t) prof_tctx_tree_t;
struct prof_info_s {
/* Time when the allocation was made. */
nstime_t alloc_time;
nstime_t alloc_time;
/* Points to the prof_tctx_t corresponding to the allocation. */
prof_tctx_t *alloc_tctx;
prof_tctx_t *alloc_tctx;
/* Allocation request size. */
size_t alloc_size;
size_t alloc_size;
};
struct prof_gctx_s {
/* Protects nlimbo, cnt_summed, and tctxs. */
malloc_mutex_t *lock;
malloc_mutex_t *lock;
/*
* Number of threads that currently cause this gctx to be in a state of
@ -123,48 +123,48 @@ struct prof_gctx_s {
* nlimbo must be 1 (single destroyer) in order to safely destroy the
* gctx.
*/
unsigned nlimbo;
unsigned nlimbo;
/*
* Tree of profile counters, one for each thread that has allocated in
* this context.
*/
prof_tctx_tree_t tctxs;
prof_tctx_tree_t tctxs;
/* Linkage for tree of contexts to be dumped. */
rb_node(prof_gctx_t) dump_link;
rb_node(prof_gctx_t) dump_link;
/* Temporary storage for summation during dump. */
prof_cnt_t cnt_summed;
prof_cnt_t cnt_summed;
/* Associated backtrace. */
prof_bt_t bt;
prof_bt_t bt;
/* Backtrace vector, variable size, referred to by bt. */
void *vec[1];
void *vec[1];
};
typedef rb_tree(prof_gctx_t) prof_gctx_tree_t;
struct prof_tdata_s {
malloc_mutex_t *lock;
malloc_mutex_t *lock;
/* Monotonically increasing unique thread identifier. */
uint64_t thr_uid;
uint64_t thr_uid;
/*
* Monotonically increasing discriminator among tdata structures
* associated with the same thr_uid.
*/
uint64_t thr_discrim;
uint64_t thr_discrim;
rb_node(prof_tdata_t) tdata_link;
rb_node(prof_tdata_t) tdata_link;
/*
* Counter used to initialize prof_tctx_t's tctx_uid. No locking is
* necessary when incrementing this field, because only one thread ever
* does so.
*/
uint64_t tctx_uid_next;
uint64_t tctx_uid_next;
/*
* Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks
@ -172,15 +172,15 @@ struct prof_tdata_s {
* associated with thread-specific prof_tctx_t objects. Other threads
* may write to prof_tctx_t contents when freeing associated objects.
*/
ckh_t bt2tctx;
ckh_t bt2tctx;
/* Included in heap profile dumps if has content. */
char thread_name[PROF_THREAD_NAME_MAX_LEN];
char thread_name[PROF_THREAD_NAME_MAX_LEN];
/* State used to avoid dumping while operating on prof internals. */
bool enq;
bool enq_idump;
bool enq_gdump;
bool enq;
bool enq_idump;
bool enq_gdump;
/*
* Set to true during an early dump phase for tdata's which are
@ -188,22 +188,22 @@ struct prof_tdata_s {
* to false so that they aren't accidentally included in later dump
* phases.
*/
bool dumping;
bool dumping;
/*
* True if profiling is active for this tdata's thread
* (thread.prof.active mallctl).
*/
bool active;
bool active;
bool attached;
bool expired;
bool attached;
bool expired;
/* Temporary storage for summation during dump. */
prof_cnt_t cnt_summed;
prof_cnt_t cnt_summed;
/* Backtrace vector, used for calls to prof_backtrace(). */
void **vec;
void **vec;
};
typedef rb_tree(prof_tdata_t) prof_tdata_tree_t;
@ -212,9 +212,9 @@ struct prof_recent_s {
nstime_t dalloc_time;
ql_elm(prof_recent_t) link;
size_t size;
size_t usize;
atomic_p_t alloc_edata; /* NULL means allocation has been freed. */
size_t size;
size_t usize;
atomic_p_t alloc_edata; /* NULL means allocation has been freed. */
prof_tctx_t *alloc_tctx;
prof_tctx_t *dalloc_tctx;
};

View file

@ -6,30 +6,30 @@
#include "jemalloc/internal/mutex.h"
extern malloc_mutex_t prof_dump_filename_mtx;
extern base_t *prof_base;
extern base_t *prof_base;
void bt_init(prof_bt_t *bt, void **vec);
void prof_backtrace(tsd_t *tsd, prof_bt_t *bt);
void prof_hooks_init(void);
void prof_unwind_init(void);
void prof_sys_thread_name_fetch(tsd_t *tsd);
int prof_getpid(void);
int prof_getpid(void);
void prof_get_default_filename(tsdn_t *tsdn, char *filename, uint64_t ind);
bool prof_prefix_set(tsdn_t *tsdn, const char *prefix);
void prof_fdump_impl(tsd_t *tsd);
void prof_idump_impl(tsd_t *tsd);
bool prof_mdump_impl(tsd_t *tsd, const char *filename);
void prof_gdump_impl(tsd_t *tsd);
int prof_thread_stack_range(uintptr_t fp, uintptr_t *low, uintptr_t *high);
int prof_thread_stack_range(uintptr_t fp, uintptr_t *low, uintptr_t *high);
/* Used in unit tests. */
typedef int (prof_sys_thread_name_read_t)(char *buf, size_t limit);
typedef int(prof_sys_thread_name_read_t)(char *buf, size_t limit);
extern prof_sys_thread_name_read_t *JET_MUTABLE prof_sys_thread_name_read;
typedef int (prof_dump_open_file_t)(const char *, int);
typedef int(prof_dump_open_file_t)(const char *, int);
extern prof_dump_open_file_t *JET_MUTABLE prof_dump_open_file;
typedef ssize_t (prof_dump_write_file_t)(int, const void *, size_t);
typedef ssize_t(prof_dump_write_file_t)(int, const void *, size_t);
extern prof_dump_write_file_t *JET_MUTABLE prof_dump_write_file;
typedef int (prof_dump_open_maps_t)(void);
typedef int(prof_dump_open_maps_t)(void);
extern prof_dump_open_maps_t *JET_MUTABLE prof_dump_open_maps;
#endif /* JEMALLOC_INTERNAL_PROF_SYS_H */

View file

@ -1,22 +1,22 @@
#ifndef JEMALLOC_INTERNAL_PROF_TYPES_H
#define JEMALLOC_INTERNAL_PROF_TYPES_H
typedef struct prof_bt_s prof_bt_t;
typedef struct prof_cnt_s prof_cnt_t;
typedef struct prof_tctx_s prof_tctx_t;
typedef struct prof_info_s prof_info_t;
typedef struct prof_gctx_s prof_gctx_t;
typedef struct prof_tdata_s prof_tdata_t;
typedef struct prof_bt_s prof_bt_t;
typedef struct prof_cnt_s prof_cnt_t;
typedef struct prof_tctx_s prof_tctx_t;
typedef struct prof_info_s prof_info_t;
typedef struct prof_gctx_s prof_gctx_t;
typedef struct prof_tdata_s prof_tdata_t;
typedef struct prof_recent_s prof_recent_t;
/* Option defaults. */
#ifdef JEMALLOC_PROF
# define PROF_PREFIX_DEFAULT "jeprof"
# define PROF_PREFIX_DEFAULT "jeprof"
#else
# define PROF_PREFIX_DEFAULT ""
# define PROF_PREFIX_DEFAULT ""
#endif
#define LG_PROF_SAMPLE_DEFAULT 19
#define LG_PROF_INTERVAL_DEFAULT -1
#define LG_PROF_SAMPLE_DEFAULT 19
#define LG_PROF_INTERVAL_DEFAULT -1
/*
* Hard limit on stack backtrace depth. The version of prof_backtrace() that
@ -24,54 +24,54 @@ typedef struct prof_recent_s prof_recent_t;
* of backtrace frame handlers, and should be kept in sync with this setting.
*/
#ifdef JEMALLOC_PROF_GCC
# define PROF_BT_MAX_LIMIT 256
# define PROF_BT_MAX_LIMIT 256
#else
# define PROF_BT_MAX_LIMIT UINT_MAX
# define PROF_BT_MAX_LIMIT UINT_MAX
#endif
#define PROF_BT_MAX_DEFAULT 128
#define PROF_BT_MAX_DEFAULT 128
/* Initial hash table size. */
#define PROF_CKH_MINITEMS 64
#define PROF_CKH_MINITEMS 64
/* Size of memory buffer to use when writing dump files. */
#ifndef JEMALLOC_PROF
/* Minimize memory bloat for non-prof builds. */
# define PROF_DUMP_BUFSIZE 1
# define PROF_DUMP_BUFSIZE 1
#elif defined(JEMALLOC_DEBUG)
/* Use a small buffer size in debug build, mainly to facilitate testing. */
# define PROF_DUMP_BUFSIZE 16
# define PROF_DUMP_BUFSIZE 16
#else
# define PROF_DUMP_BUFSIZE 65536
# define PROF_DUMP_BUFSIZE 65536
#endif
/* Size of size class related tables */
#ifdef JEMALLOC_PROF
# define PROF_SC_NSIZES SC_NSIZES
# define PROF_SC_NSIZES SC_NSIZES
#else
/* Minimize memory bloat for non-prof builds. */
# define PROF_SC_NSIZES 1
# define PROF_SC_NSIZES 1
#endif
/* Size of stack-allocated buffer used by prof_printf(). */
#define PROF_PRINTF_BUFSIZE 128
#define PROF_PRINTF_BUFSIZE 128
/*
* Number of mutexes shared among all gctx's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision.
*/
#define PROF_NCTX_LOCKS 1024
#define PROF_NCTX_LOCKS 1024
/*
* Number of mutexes shared among all tdata's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision.
*/
#define PROF_NTDATA_LOCKS 256
#define PROF_NTDATA_LOCKS 256
/* Minimize memory bloat for non-prof builds. */
#ifdef JEMALLOC_PROF
#define PROF_DUMP_FILENAME_LEN (PATH_MAX + 1)
# define PROF_DUMP_FILENAME_LEN (PATH_MAX + 1)
#else
#define PROF_DUMP_FILENAME_LEN 1
# define PROF_DUMP_FILENAME_LEN 1
#endif
/* Default number of recent allocations to record. */

View file

@ -90,7 +90,7 @@ struct psset_s {
*/
hpdata_age_heap_t pageslabs[PSSET_NPSIZES];
/* Bitmap for which set bits correspond to non-empty heaps. */
fb_group_t pageslab_bitmap[FB_NGROUPS(PSSET_NPSIZES)];
fb_group_t pageslab_bitmap[FB_NGROUPS(PSSET_NPSIZES)];
psset_stats_t stats;
/*
* Slabs with no active allocations, but which are allowed to serve new

View file

@ -28,33 +28,36 @@
*/
/* List definitions. */
#define ql_head(a_type) \
struct { \
a_type *qlh_first; \
}
#define ql_head(a_type) \
struct { \
a_type *qlh_first; \
}
/* Static initializer for an empty list. */
#define ql_head_initializer(a_head) {NULL}
#define ql_head_initializer(a_head) \
{ NULL }
/* The field definition. */
#define ql_elm(a_type) qr(a_type)
#define ql_elm(a_type) qr(a_type)
/* A pointer to the first element in the list, or NULL if the list is empty. */
#define ql_first(a_head) ((a_head)->qlh_first)
/* Dynamically initializes a list. */
#define ql_new(a_head) do { \
ql_first(a_head) = NULL; \
} while (0)
#define ql_new(a_head) \
do { \
ql_first(a_head) = NULL; \
} while (0)
/*
* Sets dest to be the contents of src (overwriting any elements there), leaving
* src empty.
*/
#define ql_move(a_head_dest, a_head_src) do { \
ql_first(a_head_dest) = ql_first(a_head_src); \
ql_new(a_head_src); \
} while (0)
#define ql_move(a_head_dest, a_head_src) \
do { \
ql_first(a_head_dest) = ql_first(a_head_src); \
ql_new(a_head_src); \
} while (0)
/* True if the list is empty, otherwise false. */
#define ql_empty(a_head) (ql_first(a_head) == NULL)
@ -68,85 +71,91 @@ struct { \
/*
* Obtains the last item in the list.
*/
#define ql_last(a_head, a_field) \
#define ql_last(a_head, a_field) \
(ql_empty(a_head) ? NULL : qr_prev(ql_first(a_head), a_field))
/*
* Gets a pointer to the next/prev element in the list. Trying to advance past
* the end or retreat before the beginning of the list returns NULL.
*/
#define ql_next(a_head, a_elm, a_field) \
((ql_last(a_head, a_field) != (a_elm)) \
? qr_next((a_elm), a_field) : NULL)
#define ql_prev(a_head, a_elm, a_field) \
((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
: NULL)
#define ql_next(a_head, a_elm, a_field) \
((ql_last(a_head, a_field) != (a_elm)) ? qr_next((a_elm), a_field) \
: NULL)
#define ql_prev(a_head, a_elm, a_field) \
((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) : NULL)
/* Inserts a_elm before a_qlelm in the list. */
#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
qr_before_insert((a_qlelm), (a_elm), a_field); \
if (ql_first(a_head) == (a_qlelm)) { \
ql_first(a_head) = (a_elm); \
} \
} while (0)
#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) \
do { \
qr_before_insert((a_qlelm), (a_elm), a_field); \
if (ql_first(a_head) == (a_qlelm)) { \
ql_first(a_head) = (a_elm); \
} \
} while (0)
/* Inserts a_elm after a_qlelm in the list. */
#define ql_after_insert(a_qlelm, a_elm, a_field) \
#define ql_after_insert(a_qlelm, a_elm, a_field) \
qr_after_insert((a_qlelm), (a_elm), a_field)
/* Inserts a_elm as the first item in the list. */
#define ql_head_insert(a_head, a_elm, a_field) do { \
if (!ql_empty(a_head)) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = (a_elm); \
} while (0)
#define ql_head_insert(a_head, a_elm, a_field) \
do { \
if (!ql_empty(a_head)) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = (a_elm); \
} while (0)
/* Inserts a_elm as the last item in the list. */
#define ql_tail_insert(a_head, a_elm, a_field) do { \
if (!ql_empty(a_head)) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = qr_next((a_elm), a_field); \
} while (0)
#define ql_tail_insert(a_head, a_elm, a_field) \
do { \
if (!ql_empty(a_head)) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = qr_next((a_elm), a_field); \
} while (0)
/*
* Given lists a = [a_1, ..., a_n] and [b_1, ..., b_n], results in:
* a = [a1, ..., a_n, b_1, ..., b_n] and b = [].
*/
#define ql_concat(a_head_a, a_head_b, a_field) do { \
if (ql_empty(a_head_a)) { \
ql_move(a_head_a, a_head_b); \
} else if (!ql_empty(a_head_b)) { \
qr_meld(ql_first(a_head_a), ql_first(a_head_b), \
a_field); \
ql_new(a_head_b); \
} \
} while (0)
#define ql_concat(a_head_a, a_head_b, a_field) \
do { \
if (ql_empty(a_head_a)) { \
ql_move(a_head_a, a_head_b); \
} else if (!ql_empty(a_head_b)) { \
qr_meld( \
ql_first(a_head_a), ql_first(a_head_b), a_field); \
ql_new(a_head_b); \
} \
} while (0)
/* Removes a_elm from the list. */
#define ql_remove(a_head, a_elm, a_field) do { \
if (ql_first(a_head) == (a_elm)) { \
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
} \
if (ql_first(a_head) != (a_elm)) { \
qr_remove((a_elm), a_field); \
} else { \
ql_new(a_head); \
} \
} while (0)
#define ql_remove(a_head, a_elm, a_field) \
do { \
if (ql_first(a_head) == (a_elm)) { \
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
} \
if (ql_first(a_head) != (a_elm)) { \
qr_remove((a_elm), a_field); \
} else { \
ql_new(a_head); \
} \
} while (0)
/* Removes the first item in the list. */
#define ql_head_remove(a_head, a_type, a_field) do { \
a_type *t = ql_first(a_head); \
ql_remove((a_head), t, a_field); \
} while (0)
#define ql_head_remove(a_head, a_type, a_field) \
do { \
a_type *t = ql_first(a_head); \
ql_remove((a_head), t, a_field); \
} while (0)
/* Removes the last item in the list. */
#define ql_tail_remove(a_head, a_type, a_field) do { \
a_type *t = ql_last(a_head, a_field); \
ql_remove((a_head), t, a_field); \
} while (0)
#define ql_tail_remove(a_head, a_type, a_field) \
do { \
a_type *t = ql_last(a_head, a_field); \
ql_remove((a_head), t, a_field); \
} while (0)
/*
* Given a = [a_1, a_2, ..., a_n-1, a_n, a_n+1, ...],
@ -155,14 +164,15 @@ struct { \
* and replaces b's contents with:
* b = [a_n, a_n+1, ...]
*/
#define ql_split(a_head_a, a_elm, a_head_b, a_field) do { \
if (ql_first(a_head_a) == (a_elm)) { \
ql_move(a_head_b, a_head_a); \
} else { \
qr_split(ql_first(a_head_a), (a_elm), a_field); \
ql_first(a_head_b) = (a_elm); \
} \
} while (0)
#define ql_split(a_head_a, a_elm, a_head_b, a_field) \
do { \
if (ql_first(a_head_a) == (a_elm)) { \
ql_move(a_head_b, a_head_a); \
} else { \
qr_split(ql_first(a_head_a), (a_elm), a_field); \
ql_first(a_head_b) = (a_elm); \
} \
} while (0)
/*
* An optimized version of:
@ -170,9 +180,10 @@ struct { \
* ql_remove((a_head), t, a_field);
* ql_tail_insert((a_head), t, a_field);
*/
#define ql_rotate(a_head, a_field) do { \
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
} while (0)
#define ql_rotate(a_head, a_field) \
do { \
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
} while (0)
/*
* Helper macro to iterate over each element in a list in order, starting from
@ -189,10 +200,10 @@ struct { \
* }
*/
#define ql_foreach(a_var, a_head, a_field) \
qr_foreach((a_var), ql_first(a_head), a_field)
#define ql_foreach(a_var, a_head, a_field) \
qr_foreach ((a_var), ql_first(a_head), a_field)
#define ql_reverse_foreach(a_var, a_head, a_field) \
#define ql_reverse_foreach(a_var, a_head, a_field) \
qr_reverse_foreach((a_var), ql_first(a_head), a_field)
#endif /* JEMALLOC_INTERNAL_QL_H */

View file

@ -17,21 +17,22 @@
*/
/* Ring definitions. */
#define qr(a_type) \
struct { \
a_type *qre_next; \
a_type *qre_prev; \
}
#define qr(a_type) \
struct { \
a_type *qre_next; \
a_type *qre_prev; \
}
/*
* Initialize a qr link. Every link must be initialized before being used, even
* if that initialization is going to be immediately overwritten (say, by being
* passed into an insertion macro).
*/
#define qr_new(a_qr, a_field) do { \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_new(a_qr, a_field) \
do { \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
/*
* Go forwards or backwards in the ring. Note that (the ring being circular), this
@ -58,26 +59,27 @@ struct { \
*
* a_qr_a can directly be a qr_next() macro, but a_qr_b cannot.
*/
#define qr_meld(a_qr_a, a_qr_b, a_field) do { \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = \
(a_qr_a)->a_field.qre_prev; \
(a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \
(a_qr_b)->a_field.qre_prev = \
(a_qr_b)->a_field.qre_prev->a_field.qre_next; \
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
} while (0)
#define qr_meld(a_qr_a, a_qr_b, a_field) \
do { \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = \
(a_qr_a)->a_field.qre_prev; \
(a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \
(a_qr_b)->a_field.qre_prev = \
(a_qr_b)->a_field.qre_prev->a_field.qre_next; \
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
} while (0)
/*
* Logically, this is just a meld. The intent, though, is that a_qrelm is a
* single-element ring, so that "before" has a more obvious interpretation than
* meld.
*/
#define qr_before_insert(a_qrelm, a_qr, a_field) \
#define qr_before_insert(a_qrelm, a_qr, a_field) \
qr_meld((a_qrelm), (a_qr), a_field)
/* Ditto, but inserting after rather than before. */
#define qr_after_insert(a_qrelm, a_qr, a_field) \
#define qr_after_insert(a_qrelm, a_qr, a_field) \
qr_before_insert(qr_next(a_qrelm, a_field), (a_qr), a_field)
/*
@ -98,14 +100,13 @@ struct { \
* qr_meld() and qr_split() are functionally equivalent, so there's no need to
* have two copies of the code.
*/
#define qr_split(a_qr_a, a_qr_b, a_field) \
qr_meld((a_qr_a), (a_qr_b), a_field)
#define qr_split(a_qr_a, a_qr_b, a_field) qr_meld((a_qr_a), (a_qr_b), a_field)
/*
* Splits off a_qr from the rest of its ring, so that it becomes a
* single-element ring.
*/
#define qr_remove(a_qr, a_field) \
#define qr_remove(a_qr, a_field) \
qr_split(qr_next(a_qr, a_field), (a_qr), a_field)
/*
@ -121,20 +122,19 @@ struct { \
* return sum;
* }
*/
#define qr_foreach(var, a_qr, a_field) \
for ((var) = (a_qr); \
(var) != NULL; \
(var) = (((var)->a_field.qre_next != (a_qr)) \
? (var)->a_field.qre_next : NULL))
#define qr_foreach(var, a_qr, a_field) \
for ((var) = (a_qr); (var) != NULL; \
(var) = (((var)->a_field.qre_next != (a_qr)) \
? (var)->a_field.qre_next \
: NULL))
/*
* The same (and with the same usage) as qr_foreach, but in the opposite order,
* ending with a_qr.
*/
#define qr_reverse_foreach(var, a_qr, a_field) \
for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
(var) != NULL; \
(var) = (((var) != (a_qr)) \
? (var)->a_field.qre_prev : NULL))
#define qr_reverse_foreach(var, a_qr, a_field) \
for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
(var) != NULL; \
(var) = (((var) != (a_qr)) ? (var)->a_field.qre_prev : NULL))
#endif /* JEMALLOC_INTERNAL_QR_H */

View file

@ -6,82 +6,84 @@
* classes).
*/
#ifndef LG_QUANTUM
# if (defined(__i386__) || defined(_M_IX86))
# define LG_QUANTUM 4
# endif
# ifdef __ia64__
# define LG_QUANTUM 4
# endif
# ifdef __alpha__
# define LG_QUANTUM 4
# endif
# if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__))
# define LG_QUANTUM 4
# endif
# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
# define LG_QUANTUM 4
# endif
# ifdef __arm__
# define LG_QUANTUM 3
# endif
# if defined(__aarch64__) || defined(_M_ARM64)
# define LG_QUANTUM 4
# endif
# ifdef __hppa__
# define LG_QUANTUM 4
# endif
# ifdef __loongarch__
# define LG_QUANTUM 4
# endif
# ifdef __m68k__
# define LG_QUANTUM 3
# endif
# ifdef __mips__
# if defined(__mips_n32) || defined(__mips_n64)
# define LG_QUANTUM 4
# else
# define LG_QUANTUM 3
# endif
# endif
# ifdef __nios2__
# define LG_QUANTUM 3
# endif
# ifdef __or1k__
# define LG_QUANTUM 3
# endif
# if defined(__powerpc__) || defined(__ppc__) || defined(__powerpc64__) || defined(__ppc64__)
# define LG_QUANTUM 4
# endif
# if defined(__riscv) || defined(__riscv__)
# define LG_QUANTUM 4
# endif
# ifdef __s390__
# define LG_QUANTUM 4
# endif
# if (defined (__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) || \
defined(__SH4_SINGLE_ONLY__))
# define LG_QUANTUM 4
# endif
# ifdef __tile__
# define LG_QUANTUM 4
# endif
# ifdef __le32__
# define LG_QUANTUM 4
# endif
# ifdef __arc__
# define LG_QUANTUM 3
# endif
# ifndef LG_QUANTUM
# error "Unknown minimum alignment for architecture; specify via "
"--with-lg-quantum"
# endif
# if (defined(__i386__) || defined(_M_IX86))
# define LG_QUANTUM 4
# endif
# ifdef __ia64__
# define LG_QUANTUM 4
# endif
# ifdef __alpha__
# define LG_QUANTUM 4
# endif
# if (defined(__sparc64__) || defined(__sparcv9) \
|| defined(__sparc_v9__))
# define LG_QUANTUM 4
# endif
# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
# define LG_QUANTUM 4
# endif
# ifdef __arm__
# define LG_QUANTUM 3
# endif
# if defined(__aarch64__) || defined(_M_ARM64)
# define LG_QUANTUM 4
# endif
# ifdef __hppa__
# define LG_QUANTUM 4
# endif
# ifdef __loongarch__
# define LG_QUANTUM 4
# endif
# ifdef __m68k__
# define LG_QUANTUM 3
# endif
# ifdef __mips__
# if defined(__mips_n32) || defined(__mips_n64)
# define LG_QUANTUM 4
# else
# define LG_QUANTUM 3
# endif
# endif
# ifdef __nios2__
# define LG_QUANTUM 3
# endif
# ifdef __or1k__
# define LG_QUANTUM 3
# endif
# if defined(__powerpc__) || defined(__ppc__) || defined(__powerpc64__) \
|| defined(__ppc64__)
# define LG_QUANTUM 4
# endif
# if defined(__riscv) || defined(__riscv__)
# define LG_QUANTUM 4
# endif
# ifdef __s390__
# define LG_QUANTUM 4
# endif
# if (defined(__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) \
|| defined(__SH4_SINGLE_ONLY__))
# define LG_QUANTUM 4
# endif
# ifdef __tile__
# define LG_QUANTUM 4
# endif
# ifdef __le32__
# define LG_QUANTUM 4
# endif
# ifdef __arc__
# define LG_QUANTUM 3
# endif
# ifndef LG_QUANTUM
# error \
"Unknown minimum alignment for architecture; specify via "
"--with-lg-quantum"
# endif
#endif
#define QUANTUM ((size_t)(1U << LG_QUANTUM))
#define QUANTUM_MASK (QUANTUM - 1)
#define QUANTUM ((size_t)(1U << LG_QUANTUM))
#define QUANTUM_MASK (QUANTUM - 1)
/* Return the smallest quantum multiple that is >= a. */
#define QUANTUM_CEILING(a) \
(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
#define QUANTUM_CEILING(a) (((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
#endif /* JEMALLOC_INTERNAL_QUANTUM_H */

View file

@ -26,7 +26,7 @@
*/
#ifndef __PGI
#define RB_COMPACT
# define RB_COMPACT
#endif
/*

View file

@ -18,48 +18,49 @@
*/
/* Number of high insignificant bits. */
#define RTREE_NHIB ((1U << (LG_SIZEOF_PTR+3)) - LG_VADDR)
#define RTREE_NHIB ((1U << (LG_SIZEOF_PTR + 3)) - LG_VADDR)
/* Number of low insigificant bits. */
#define RTREE_NLIB LG_PAGE
/* Number of significant bits. */
#define RTREE_NSB (LG_VADDR - RTREE_NLIB)
/* Number of levels in radix tree. */
#if RTREE_NSB <= 10
# define RTREE_HEIGHT 1
# define RTREE_HEIGHT 1
#elif RTREE_NSB <= 36
# define RTREE_HEIGHT 2
# define RTREE_HEIGHT 2
#elif RTREE_NSB <= 52
# define RTREE_HEIGHT 3
# define RTREE_HEIGHT 3
#else
# error Unsupported number of significant virtual address bits
# error Unsupported number of significant virtual address bits
#endif
/* Use compact leaf representation if virtual address encoding allows. */
#if RTREE_NHIB >= LG_CEIL(SC_NSIZES)
# define RTREE_LEAF_COMPACT
# define RTREE_LEAF_COMPACT
#endif
typedef struct rtree_node_elm_s rtree_node_elm_t;
struct rtree_node_elm_s {
atomic_p_t child; /* (rtree_{node,leaf}_elm_t *) */
atomic_p_t child; /* (rtree_{node,leaf}_elm_t *) */
};
typedef struct rtree_metadata_s rtree_metadata_t;
struct rtree_metadata_s {
szind_t szind;
extent_state_t state; /* Mirrors edata->state. */
bool is_head; /* Mirrors edata->is_head. */
bool slab;
szind_t szind;
extent_state_t state; /* Mirrors edata->state. */
bool is_head; /* Mirrors edata->is_head. */
bool slab;
};
typedef struct rtree_contents_s rtree_contents_t;
struct rtree_contents_s {
edata_t *edata;
edata_t *edata;
rtree_metadata_t metadata;
};
#define RTREE_LEAF_STATE_WIDTH EDATA_BITS_STATE_WIDTH
#define RTREE_LEAF_STATE_SHIFT 2
#define RTREE_LEAF_STATE_MASK MASK(RTREE_LEAF_STATE_WIDTH, RTREE_LEAF_STATE_SHIFT)
#define RTREE_LEAF_STATE_MASK \
MASK(RTREE_LEAF_STATE_WIDTH, RTREE_LEAF_STATE_SHIFT)
struct rtree_leaf_elm_s {
#ifdef RTREE_LEAF_COMPACT
@ -77,36 +78,36 @@ struct rtree_leaf_elm_s {
*
* 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee e00ssshb
*/
atomic_p_t le_bits;
atomic_p_t le_bits;
#else
atomic_p_t le_edata; /* (edata_t *) */
atomic_p_t le_edata; /* (edata_t *) */
/*
* From high to low bits: szind (8 bits), state (4 bits), is_head, slab
*/
atomic_u_t le_metadata;
atomic_u_t le_metadata;
#endif
};
typedef struct rtree_level_s rtree_level_t;
struct rtree_level_s {
/* Number of key bits distinguished by this level. */
unsigned bits;
unsigned bits;
/*
* Cumulative number of key bits distinguished by traversing to
* corresponding tree level.
*/
unsigned cumbits;
unsigned cumbits;
};
typedef struct rtree_s rtree_t;
struct rtree_s {
base_t *base;
malloc_mutex_t init_lock;
base_t *base;
malloc_mutex_t init_lock;
/* Number of elements based on rtree_levels[0].bits. */
#if RTREE_HEIGHT > 1
rtree_node_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)];
rtree_node_elm_t root[1U << (RTREE_NSB / RTREE_HEIGHT)];
#else
rtree_leaf_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)];
rtree_leaf_elm_t root[1U << (RTREE_NSB / RTREE_HEIGHT)];
#endif
};
@ -118,17 +119,17 @@ struct rtree_s {
*/
static const rtree_level_t rtree_levels[] = {
#if RTREE_HEIGHT == 1
{RTREE_NSB, RTREE_NHIB + RTREE_NSB}
{RTREE_NSB, RTREE_NHIB + RTREE_NSB}
#elif RTREE_HEIGHT == 2
{RTREE_NSB/2, RTREE_NHIB + RTREE_NSB/2},
{RTREE_NSB/2 + RTREE_NSB%2, RTREE_NHIB + RTREE_NSB}
{RTREE_NSB / 2, RTREE_NHIB + RTREE_NSB / 2},
{RTREE_NSB / 2 + RTREE_NSB % 2, RTREE_NHIB + RTREE_NSB}
#elif RTREE_HEIGHT == 3
{RTREE_NSB/3, RTREE_NHIB + RTREE_NSB/3},
{RTREE_NSB/3 + RTREE_NSB%3/2,
RTREE_NHIB + RTREE_NSB/3*2 + RTREE_NSB%3/2},
{RTREE_NSB/3 + RTREE_NSB%3 - RTREE_NSB%3/2, RTREE_NHIB + RTREE_NSB}
{RTREE_NSB / 3, RTREE_NHIB + RTREE_NSB / 3},
{RTREE_NSB / 3 + RTREE_NSB % 3 / 2,
RTREE_NHIB + RTREE_NSB / 3 * 2 + RTREE_NSB % 3 / 2},
{RTREE_NSB / 3 + RTREE_NSB % 3 - RTREE_NSB % 3 / 2, RTREE_NHIB + RTREE_NSB}
#else
# error Unsupported rtree height
# error Unsupported rtree height
#endif
};
@ -139,9 +140,9 @@ rtree_leaf_elm_t *rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree,
JEMALLOC_ALWAYS_INLINE unsigned
rtree_leaf_maskbits(void) {
unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits -
rtree_levels[RTREE_HEIGHT-1].bits);
unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR + 3);
unsigned cumbits = (rtree_levels[RTREE_HEIGHT - 1].cumbits
- rtree_levels[RTREE_HEIGHT - 1].bits);
return ptrbits - cumbits;
}
@ -153,16 +154,16 @@ rtree_leafkey(uintptr_t key) {
JEMALLOC_ALWAYS_INLINE size_t
rtree_cache_direct_map(uintptr_t key) {
return (size_t)((key >> rtree_leaf_maskbits()) &
(RTREE_CTX_NCACHE - 1));
return (
size_t)((key >> rtree_leaf_maskbits()) & (RTREE_CTX_NCACHE - 1));
}
JEMALLOC_ALWAYS_INLINE uintptr_t
rtree_subkey(uintptr_t key, unsigned level) {
unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
unsigned cumbits = rtree_levels[level].cumbits;
unsigned shiftbits = ptrbits - cumbits;
unsigned maskbits = rtree_levels[level].bits;
unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR + 3);
unsigned cumbits = rtree_levels[level].cumbits;
unsigned shiftbits = ptrbits - cumbits;
unsigned maskbits = rtree_levels[level].bits;
uintptr_t mask = (ZU(1) << maskbits) - 1;
return ((key >> shiftbits) & mask);
}
@ -178,12 +179,12 @@ rtree_subkey(uintptr_t key, unsigned level) {
* dependent on a previous rtree write, which means a stale read
* could result if synchronization were omitted here.
*/
# ifdef RTREE_LEAF_COMPACT
#ifdef RTREE_LEAF_COMPACT
JEMALLOC_ALWAYS_INLINE uintptr_t
rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, bool dependent) {
return (uintptr_t)atomic_load_p(&elm->le_bits, dependent
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
rtree_leaf_elm_bits_read(
tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, bool dependent) {
return (uintptr_t)atomic_load_p(
&elm->le_bits, dependent ? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
}
JEMALLOC_ALWAYS_INLINE uintptr_t
@ -195,10 +196,10 @@ rtree_leaf_elm_bits_encode(rtree_contents_t contents) {
uintptr_t szind_bits = (uintptr_t)contents.metadata.szind << LG_VADDR;
uintptr_t slab_bits = (uintptr_t)contents.metadata.slab;
uintptr_t is_head_bits = (uintptr_t)contents.metadata.is_head << 1;
uintptr_t state_bits = (uintptr_t)contents.metadata.state <<
RTREE_LEAF_STATE_SHIFT;
uintptr_t metadata_bits = szind_bits | state_bits | is_head_bits |
slab_bits;
uintptr_t state_bits = (uintptr_t)contents.metadata.state
<< RTREE_LEAF_STATE_SHIFT;
uintptr_t metadata_bits = szind_bits | state_bits | is_head_bits
| slab_bits;
assert((edata_bits & metadata_bits) == 0);
return edata_bits | metadata_bits;
@ -212,13 +213,13 @@ rtree_leaf_elm_bits_decode(uintptr_t bits) {
contents.metadata.slab = (bool)(bits & 1);
contents.metadata.is_head = (bool)(bits & (1 << 1));
uintptr_t state_bits = (bits & RTREE_LEAF_STATE_MASK) >>
RTREE_LEAF_STATE_SHIFT;
uintptr_t state_bits = (bits & RTREE_LEAF_STATE_MASK)
>> RTREE_LEAF_STATE_SHIFT;
assert(state_bits <= extent_state_max);
contents.metadata.state = (extent_state_t)state_bits;
uintptr_t low_bit_mask = ~((uintptr_t)EDATA_ALIGNMENT - 1);
# ifdef __aarch64__
# ifdef __aarch64__
/*
* aarch64 doesn't sign extend the highest virtual address bit to set
* the higher ones. Instead, the high bits get zeroed.
@ -228,49 +229,50 @@ rtree_leaf_elm_bits_decode(uintptr_t bits) {
uintptr_t mask = high_bit_mask & low_bit_mask;
/* NOLINTNEXTLINE(performance-no-int-to-ptr) */
contents.edata = (edata_t *)(bits & mask);
# else
# else
/* Restore sign-extended high bits, mask metadata bits. */
/* NOLINTNEXTLINE(performance-no-int-to-ptr) */
contents.edata = (edata_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB)
>> RTREE_NHIB) & low_bit_mask);
# endif
>> RTREE_NHIB)
& low_bit_mask);
# endif
assert((uintptr_t)contents.edata % (uintptr_t)EDATA_ALIGNMENT == 0);
return contents;
}
# endif /* RTREE_LEAF_COMPACT */
#endif /* RTREE_LEAF_COMPACT */
JEMALLOC_ALWAYS_INLINE rtree_contents_t
rtree_leaf_elm_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
bool dependent) {
rtree_leaf_elm_read(
tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, bool dependent) {
#ifdef RTREE_LEAF_COMPACT
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
rtree_contents_t contents = rtree_leaf_elm_bits_decode(bits);
return contents;
#else
rtree_contents_t contents;
unsigned metadata_bits = atomic_load_u(&elm->le_metadata, dependent
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
unsigned metadata_bits = atomic_load_u(
&elm->le_metadata, dependent ? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
contents.metadata.slab = (bool)(metadata_bits & 1);
contents.metadata.is_head = (bool)(metadata_bits & (1 << 1));
uintptr_t state_bits = (metadata_bits & RTREE_LEAF_STATE_MASK) >>
RTREE_LEAF_STATE_SHIFT;
uintptr_t state_bits = (metadata_bits & RTREE_LEAF_STATE_MASK)
>> RTREE_LEAF_STATE_SHIFT;
assert(state_bits <= extent_state_max);
contents.metadata.state = (extent_state_t)state_bits;
contents.metadata.szind = metadata_bits >> (RTREE_LEAF_STATE_SHIFT +
RTREE_LEAF_STATE_WIDTH);
contents.metadata.szind = metadata_bits
>> (RTREE_LEAF_STATE_SHIFT + RTREE_LEAF_STATE_WIDTH);
contents.edata = (edata_t *)atomic_load_p(&elm->le_edata, dependent
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
contents.edata = (edata_t *)atomic_load_p(
&elm->le_edata, dependent ? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
return contents;
#endif
}
JEMALLOC_ALWAYS_INLINE void
rtree_contents_encode(rtree_contents_t contents, void **bits,
unsigned *additional) {
rtree_contents_encode(
rtree_contents_t contents, void **bits, unsigned *additional) {
#ifdef RTREE_LEAF_COMPACT
/* NOLINTNEXTLINE(performance-no-int-to-ptr) */
*bits = (void *)rtree_leaf_elm_bits_encode(contents);
@ -282,15 +284,15 @@ rtree_contents_encode(rtree_contents_t contents, void **bits,
*additional = (unsigned)contents.metadata.slab
| ((unsigned)contents.metadata.is_head << 1)
| ((unsigned)contents.metadata.state << RTREE_LEAF_STATE_SHIFT)
| ((unsigned)contents.metadata.szind << (RTREE_LEAF_STATE_SHIFT +
RTREE_LEAF_STATE_WIDTH));
| ((unsigned)contents.metadata.szind
<< (RTREE_LEAF_STATE_SHIFT + RTREE_LEAF_STATE_WIDTH));
*bits = contents.edata;
#endif
}
JEMALLOC_ALWAYS_INLINE void
rtree_leaf_elm_write_commit(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, void *bits, unsigned additional) {
rtree_leaf_elm_write_commit(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
void *bits, unsigned additional) {
#ifdef RTREE_LEAF_COMPACT
atomic_store_p(&elm->le_bits, bits, ATOMIC_RELEASE);
#else
@ -304,10 +306,10 @@ rtree_leaf_elm_write_commit(tsdn_t *tsdn, rtree_t *rtree,
}
JEMALLOC_ALWAYS_INLINE void
rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, rtree_contents_t contents) {
rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
rtree_contents_t contents) {
assert((uintptr_t)contents.edata % EDATA_ALIGNMENT == 0);
void *bits;
void *bits;
unsigned additional;
rtree_contents_encode(contents, &bits, &additional);
rtree_leaf_elm_write_commit(tsdn, rtree, elm, bits, additional);
@ -348,7 +350,7 @@ rtree_leaf_elm_state_update(tsdn_t *tsdn, rtree_t *rtree,
JEMALLOC_ALWAYS_INLINE bool
rtree_leaf_elm_lookup_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, rtree_leaf_elm_t **elm) {
size_t slot = rtree_cache_direct_map(key);
size_t slot = rtree_cache_direct_map(key);
uintptr_t leafkey = rtree_leafkey(key);
assert(leafkey != RTREE_LEAFKEY_INVALID);
@ -358,7 +360,7 @@ rtree_leaf_elm_lookup_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf;
assert(leaf != NULL);
uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1);
uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT - 1);
*elm = &leaf[subkey];
return false;
@ -370,7 +372,7 @@ rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
assert(key != 0);
assert(!dependent || !init_missing);
size_t slot = rtree_cache_direct_map(key);
size_t slot = rtree_cache_direct_map(key);
uintptr_t leafkey = rtree_leafkey(key);
assert(leafkey != RTREE_LEAFKEY_INVALID);
@ -378,39 +380,41 @@ rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
if (likely(rtree_ctx->cache[slot].leafkey == leafkey)) {
rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf;
assert(leaf != NULL);
uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1);
uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT - 1);
return &leaf[subkey];
}
/*
* Search the L2 LRU cache. On hit, swap the matching element into the
* slot in L1 cache, and move the position in L2 up by 1.
*/
#define RTREE_CACHE_CHECK_L2(i) do { \
if (likely(rtree_ctx->l2_cache[i].leafkey == leafkey)) { \
rtree_leaf_elm_t *leaf = rtree_ctx->l2_cache[i].leaf; \
assert(leaf != NULL); \
if (i > 0) { \
/* Bubble up by one. */ \
rtree_ctx->l2_cache[i].leafkey = \
rtree_ctx->l2_cache[i - 1].leafkey; \
rtree_ctx->l2_cache[i].leaf = \
rtree_ctx->l2_cache[i - 1].leaf; \
rtree_ctx->l2_cache[i - 1].leafkey = \
rtree_ctx->cache[slot].leafkey; \
rtree_ctx->l2_cache[i - 1].leaf = \
rtree_ctx->cache[slot].leaf; \
} else { \
rtree_ctx->l2_cache[0].leafkey = \
rtree_ctx->cache[slot].leafkey; \
rtree_ctx->l2_cache[0].leaf = \
rtree_ctx->cache[slot].leaf; \
} \
rtree_ctx->cache[slot].leafkey = leafkey; \
rtree_ctx->cache[slot].leaf = leaf; \
uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); \
return &leaf[subkey]; \
} \
} while (0)
#define RTREE_CACHE_CHECK_L2(i) \
do { \
if (likely(rtree_ctx->l2_cache[i].leafkey == leafkey)) { \
rtree_leaf_elm_t *leaf = rtree_ctx->l2_cache[i].leaf; \
assert(leaf != NULL); \
if (i > 0) { \
/* Bubble up by one. */ \
rtree_ctx->l2_cache[i].leafkey = \
rtree_ctx->l2_cache[i - 1].leafkey; \
rtree_ctx->l2_cache[i].leaf = \
rtree_ctx->l2_cache[i - 1].leaf; \
rtree_ctx->l2_cache[i - 1].leafkey = \
rtree_ctx->cache[slot].leafkey; \
rtree_ctx->l2_cache[i - 1].leaf = \
rtree_ctx->cache[slot].leaf; \
} else { \
rtree_ctx->l2_cache[0].leafkey = \
rtree_ctx->cache[slot].leafkey; \
rtree_ctx->l2_cache[0].leaf = \
rtree_ctx->cache[slot].leaf; \
} \
rtree_ctx->cache[slot].leafkey = leafkey; \
rtree_ctx->cache[slot].leaf = leaf; \
uintptr_t subkey = rtree_subkey( \
key, RTREE_HEIGHT - 1); \
return &leaf[subkey]; \
} \
} while (0)
/* Check the first cache entry. */
RTREE_CACHE_CHECK_L2(0);
/* Search the remaining cache elements. */
@ -419,8 +423,8 @@ rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
}
#undef RTREE_CACHE_CHECK_L2
return rtree_leaf_elm_lookup_hard(tsdn, rtree, rtree_ctx, key,
dependent, init_missing);
return rtree_leaf_elm_lookup_hard(
tsdn, rtree, rtree_ctx, key, dependent, init_missing);
}
/*
@ -440,8 +444,8 @@ rtree_read_independent(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
}
static inline rtree_contents_t
rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key) {
rtree_read(
tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key) {
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
key, /* dependent */ true, /* init_missing */ false);
assert(elm != NULL);
@ -449,21 +453,22 @@ rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
}
static inline rtree_metadata_t
rtree_metadata_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key) {
rtree_metadata_read(
tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key) {
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
key, /* dependent */ true, /* init_missing */ false);
assert(elm != NULL);
return rtree_leaf_elm_read(tsdn, rtree, elm,
/* dependent */ true).metadata;
/* dependent */ true)
.metadata;
}
/*
* Returns true when the request cannot be fulfilled by fastpath.
*/
static inline bool
rtree_metadata_try_read_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, rtree_metadata_t *r_rtree_metadata) {
rtree_metadata_try_read_fast(tsdn_t *tsdn, rtree_t *rtree,
rtree_ctx_t *rtree_ctx, uintptr_t key, rtree_metadata_t *r_rtree_metadata) {
rtree_leaf_elm_t *elm;
/*
* Should check the bool return value (lookup success or not) instead of
@ -476,7 +481,8 @@ rtree_metadata_try_read_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ct
}
assert(elm != NULL);
*r_rtree_metadata = rtree_leaf_elm_read(tsdn, rtree, elm,
/* dependent */ true).metadata;
/* dependent */ true)
.metadata;
return false;
}
@ -490,22 +496,27 @@ rtree_write_range_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
* are dependent w/o init_missing, assuming the range spans across at
* most 2 rtree leaf nodes (each covers 1 GiB of vaddr).
*/
void *bits;
void *bits;
unsigned additional;
rtree_contents_encode(contents, &bits, &additional);
rtree_leaf_elm_t *elm = NULL; /* Dead store. */
for (uintptr_t addr = base; addr <= end; addr += PAGE) {
if (addr == base ||
(addr & ((ZU(1) << rtree_leaf_maskbits()) - 1)) == 0) {
elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, addr,
if (addr == base
|| (addr & ((ZU(1) << rtree_leaf_maskbits()) - 1)) == 0) {
elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
addr,
/* dependent */ true, /* init_missing */ false);
assert(elm != NULL);
}
assert(elm == rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, addr,
/* dependent */ true, /* init_missing */ false));
assert(!clearing || rtree_leaf_elm_read(tsdn, rtree, elm,
/* dependent */ true).edata != NULL);
assert(elm
== rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, addr,
/* dependent */ true, /* init_missing */ false));
assert(!clearing
|| rtree_leaf_elm_read(tsdn, rtree, elm,
/* dependent */ true)
.edata
!= NULL);
rtree_leaf_elm_write_commit(tsdn, rtree, elm, bits, additional);
elm++;
}
@ -533,13 +544,15 @@ rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
}
static inline void
rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key) {
rtree_clear(
tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key) {
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
key, /* dependent */ true, /* init_missing */ false);
assert(elm != NULL);
assert(rtree_leaf_elm_read(tsdn, rtree, elm,
/* dependent */ true).edata != NULL);
/* dependent */ true)
.edata
!= NULL);
rtree_contents_t contents;
contents.edata = NULL;
contents.metadata.szind = SC_NSIZES;

View file

@ -25,7 +25,8 @@
/* Needed for initialization only. */
#define RTREE_LEAFKEY_INVALID ((uintptr_t)1)
#define RTREE_CTX_CACHE_ELM_INVALID {RTREE_LEAFKEY_INVALID, NULL}
#define RTREE_CTX_CACHE_ELM_INVALID \
{ RTREE_LEAFKEY_INVALID, NULL }
#define RTREE_CTX_INIT_ELM_1 RTREE_CTX_CACHE_ELM_INVALID
#define RTREE_CTX_INIT_ELM_2 RTREE_CTX_INIT_ELM_1, RTREE_CTX_INIT_ELM_1
@ -40,23 +41,27 @@
* Static initializer (to invalidate the cache entries) is required because the
* free fastpath may access the rtree cache before a full tsd initialization.
*/
#define RTREE_CTX_INITIALIZER {{RTREE_CTX_INIT_ELM_DATA(RTREE_CTX_NCACHE)}, \
{RTREE_CTX_INIT_ELM_DATA(RTREE_CTX_NCACHE_L2)}}
#define RTREE_CTX_INITIALIZER \
{ \
{RTREE_CTX_INIT_ELM_DATA(RTREE_CTX_NCACHE)}, { \
RTREE_CTX_INIT_ELM_DATA(RTREE_CTX_NCACHE_L2) \
} \
}
typedef struct rtree_leaf_elm_s rtree_leaf_elm_t;
typedef struct rtree_ctx_cache_elm_s rtree_ctx_cache_elm_t;
struct rtree_ctx_cache_elm_s {
uintptr_t leafkey;
rtree_leaf_elm_t *leaf;
uintptr_t leafkey;
rtree_leaf_elm_t *leaf;
};
typedef struct rtree_ctx_s rtree_ctx_t;
struct rtree_ctx_s {
/* Direct mapped cache. */
rtree_ctx_cache_elm_t cache[RTREE_CTX_NCACHE];
rtree_ctx_cache_elm_t cache[RTREE_CTX_NCACHE];
/* L2 LRU cache. */
rtree_ctx_cache_elm_t l2_cache[RTREE_CTX_NCACHE_L2];
rtree_ctx_cache_elm_t l2_cache[RTREE_CTX_NCACHE_L2];
};
void rtree_ctx_data_init(rtree_ctx_t *ctx);

View file

@ -7,8 +7,8 @@
#define SAFETY_CHECK_DOUBLE_FREE_MAX_SCAN_DEFAULT 32
void safety_check_fail_sized_dealloc(bool current_dealloc, const void *ptr,
size_t true_size, size_t input_size);
void safety_check_fail_sized_dealloc(
bool current_dealloc, const void *ptr, size_t true_size, size_t input_size);
void safety_check_fail(const char *format, ...);
typedef void (*safety_check_abort_hook_t)(const char *message);
@ -16,7 +16,7 @@ typedef void (*safety_check_abort_hook_t)(const char *message);
/* Can set to NULL for a default. */
void safety_check_set_abort(safety_check_abort_hook_t abort_fn);
#define REDZONE_SIZE ((size_t) 32)
#define REDZONE_SIZE ((size_t)32)
#define REDZONE_FILL_VALUE 0xBC
/*
@ -27,9 +27,10 @@ void safety_check_set_abort(safety_check_abort_hook_t abort_fn);
*/
JEMALLOC_ALWAYS_INLINE const unsigned char *
compute_redzone_end(const void *_ptr, size_t usize, size_t bumped_usize) {
const unsigned char *ptr = (const unsigned char *) _ptr;
const unsigned char *redzone_end = usize + REDZONE_SIZE < bumped_usize ?
&ptr[usize + REDZONE_SIZE] : &ptr[bumped_usize];
const unsigned char *ptr = (const unsigned char *)_ptr;
const unsigned char *redzone_end = usize + REDZONE_SIZE < bumped_usize
? &ptr[usize + REDZONE_SIZE]
: &ptr[bumped_usize];
const unsigned char *page_end = (const unsigned char *)
ALIGNMENT_ADDR2CEILING(&ptr[usize], os_page);
return redzone_end < page_end ? redzone_end : page_end;
@ -38,8 +39,8 @@ compute_redzone_end(const void *_ptr, size_t usize, size_t bumped_usize) {
JEMALLOC_ALWAYS_INLINE void
safety_check_set_redzone(void *ptr, size_t usize, size_t bumped_usize) {
assert(usize <= bumped_usize);
const unsigned char *redzone_end =
compute_redzone_end(ptr, usize, bumped_usize);
const unsigned char *redzone_end = compute_redzone_end(
ptr, usize, bumped_usize);
for (unsigned char *curr = &((unsigned char *)ptr)[usize];
curr < redzone_end; curr++) {
*curr = REDZONE_FILL_VALUE;
@ -47,11 +48,11 @@ safety_check_set_redzone(void *ptr, size_t usize, size_t bumped_usize) {
}
JEMALLOC_ALWAYS_INLINE void
safety_check_verify_redzone(const void *ptr, size_t usize, size_t bumped_usize)
{
const unsigned char *redzone_end =
compute_redzone_end(ptr, usize, bumped_usize);
for (const unsigned char *curr= &((const unsigned char *)ptr)[usize];
safety_check_verify_redzone(
const void *ptr, size_t usize, size_t bumped_usize) {
const unsigned char *redzone_end = compute_redzone_end(
ptr, usize, bumped_usize);
for (const unsigned char *curr = &((const unsigned char *)ptr)[usize];
curr < redzone_end; curr++) {
if (unlikely(*curr != REDZONE_FILL_VALUE)) {
safety_check_fail("Use after free error\n");

View file

@ -32,22 +32,22 @@ void san_unguard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
* Unguard the extent, but don't modify emap boundaries. Must be called on an
* extent that has been erased from emap and shouldn't be placed back.
*/
void san_unguard_pages_pre_destroy(tsdn_t *tsdn, ehooks_t *ehooks,
edata_t *edata, emap_t *emap);
void san_unguard_pages_pre_destroy(
tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, emap_t *emap);
void san_check_stashed_ptrs(void **ptrs, size_t nstashed, size_t usize);
void tsd_san_init(tsd_t *tsd);
void san_init(ssize_t lg_san_uaf_align);
static inline void
san_guard_pages_two_sided(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
emap_t *emap, bool remap) {
san_guard_pages_two_sided(
tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, emap_t *emap, bool remap) {
san_guard_pages(tsdn, ehooks, edata, emap, true, true, remap);
}
static inline void
san_unguard_pages_two_sided(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
emap_t *emap) {
san_unguard_pages_two_sided(
tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, emap_t *emap) {
san_unguard_pages(tsdn, ehooks, edata, emap, true, true);
}
@ -83,14 +83,14 @@ san_guard_enabled(void) {
}
static inline bool
san_large_extent_decide_guard(tsdn_t *tsdn, ehooks_t *ehooks, size_t size,
size_t alignment) {
if (opt_san_guard_large == 0 || ehooks_guard_will_fail(ehooks) ||
tsdn_null(tsdn)) {
san_large_extent_decide_guard(
tsdn_t *tsdn, ehooks_t *ehooks, size_t size, size_t alignment) {
if (opt_san_guard_large == 0 || ehooks_guard_will_fail(ehooks)
|| tsdn_null(tsdn)) {
return false;
}
tsd_t *tsd = tsdn_tsd(tsdn);
tsd_t *tsd = tsdn_tsd(tsdn);
uint64_t n = tsd_san_extents_until_guard_large_get(tsd);
assert(n >= 1);
if (n > 1) {
@ -101,10 +101,10 @@ san_large_extent_decide_guard(tsdn_t *tsdn, ehooks_t *ehooks, size_t size,
*tsd_san_extents_until_guard_largep_get(tsd) = n - 1;
}
if (n == 1 && (alignment <= PAGE) &&
(san_two_side_guarded_sz(size) <= SC_LARGE_MAXCLASS)) {
*tsd_san_extents_until_guard_largep_get(tsd) =
opt_san_guard_large;
if (n == 1 && (alignment <= PAGE)
&& (san_two_side_guarded_sz(size) <= SC_LARGE_MAXCLASS)) {
*tsd_san_extents_until_guard_largep_get(
tsd) = opt_san_guard_large;
return true;
} else {
assert(tsd_san_extents_until_guard_large_get(tsd) >= 1);
@ -114,17 +114,17 @@ san_large_extent_decide_guard(tsdn_t *tsdn, ehooks_t *ehooks, size_t size,
static inline bool
san_slab_extent_decide_guard(tsdn_t *tsdn, ehooks_t *ehooks) {
if (opt_san_guard_small == 0 || ehooks_guard_will_fail(ehooks) ||
tsdn_null(tsdn)) {
if (opt_san_guard_small == 0 || ehooks_guard_will_fail(ehooks)
|| tsdn_null(tsdn)) {
return false;
}
tsd_t *tsd = tsdn_tsd(tsdn);
tsd_t *tsd = tsdn_tsd(tsdn);
uint64_t n = tsd_san_extents_until_guard_small_get(tsd);
assert(n >= 1);
if (n == 1) {
*tsd_san_extents_until_guard_smallp_get(tsd) =
opt_san_guard_small;
*tsd_san_extents_until_guard_smallp_get(
tsd) = opt_san_guard_small;
return true;
} else {
*tsd_san_extents_until_guard_smallp_get(tsd) = n - 1;
@ -134,8 +134,8 @@ san_slab_extent_decide_guard(tsdn_t *tsdn, ehooks_t *ehooks) {
}
static inline void
san_junk_ptr_locations(void *ptr, size_t usize, void **first, void **mid,
void **last) {
san_junk_ptr_locations(
void *ptr, size_t usize, void **first, void **mid, void **last) {
size_t ptr_sz = sizeof(void *);
*first = ptr;
@ -184,8 +184,8 @@ static inline bool
san_uaf_detection_enabled(void) {
bool ret = config_uaf_detection && (opt_lg_san_uaf_align != -1);
if (config_uaf_detection && ret) {
assert(san_cache_bin_nonfast_mask == ((uintptr_t)1 <<
opt_lg_san_uaf_align) - 1);
assert(san_cache_bin_nonfast_mask
== ((uintptr_t)1 << opt_lg_san_uaf_align) - 1);
}
return ret;

View file

@ -12,7 +12,7 @@
extern bool opt_retain;
typedef struct ehooks_s ehooks_t;
typedef struct pac_s pac_t;
typedef struct pac_s pac_t;
typedef struct san_bump_alloc_s san_bump_alloc_t;
struct san_bump_alloc_s {
@ -36,7 +36,7 @@ san_bump_enabled(void) {
}
static inline bool
san_bump_alloc_init(san_bump_alloc_t* sba) {
san_bump_alloc_init(san_bump_alloc_t *sba) {
bool err = malloc_mutex_init(&sba->mtx, "sanitizer_bump_allocator",
WITNESS_RANK_SAN_BUMP_ALLOC, malloc_mutex_rank_exclusive);
if (err) {
@ -47,8 +47,7 @@ san_bump_alloc_init(san_bump_alloc_t* sba) {
return false;
}
edata_t *
san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t* sba, pac_t *pac, ehooks_t *ehooks,
size_t size, bool zero);
edata_t *san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t *sba, pac_t *pac,
ehooks_t *ehooks, size_t size, bool zero);
#endif /* JEMALLOC_INTERNAL_SAN_BUMP_H */

View file

@ -174,7 +174,7 @@
#if SC_LG_TINY_MIN == 0
/* The div module doesn't support division by 1, which this would require. */
#error "Unsupported LG_TINY_MIN"
# error "Unsupported LG_TINY_MIN"
#endif
/*
@ -194,8 +194,8 @@
* We could probably save some space in arenas by capping this at LG_VADDR size.
*/
#define SC_LG_BASE_MAX (SC_PTR_BITS - 2)
#define SC_NREGULAR (SC_NGROUP * \
(SC_LG_BASE_MAX - SC_LG_FIRST_REGULAR_BASE + 1) - 1)
#define SC_NREGULAR \
(SC_NGROUP * (SC_LG_BASE_MAX - SC_LG_FIRST_REGULAR_BASE + 1) - 1)
#define SC_NSIZES (SC_NTINY + SC_NPSEUDO + SC_NREGULAR)
/*
@ -222,29 +222,29 @@
*
* This gives us the quantity we seek.
*/
#define SC_NPSIZES ( \
SC_NGROUP \
+ (SC_LG_BASE_MAX - (LG_PAGE + SC_LG_NGROUP)) * SC_NGROUP \
+ SC_NGROUP - 1)
#define SC_NPSIZES \
(SC_NGROUP + (SC_LG_BASE_MAX - (LG_PAGE + SC_LG_NGROUP)) * SC_NGROUP \
+ SC_NGROUP - 1)
/*
* We declare a size class is binnable if size < page size * group. Or, in other
* words, lg(size) < lg(page size) + lg(group size).
*/
#define SC_NBINS ( \
/* Sub-regular size classes. */ \
SC_NTINY + SC_NPSEUDO \
/* Groups with lg_regular_min_base <= lg_base <= lg_base_max */ \
+ SC_NGROUP * (LG_PAGE + SC_LG_NGROUP - SC_LG_FIRST_REGULAR_BASE) \
/* Last SC of the last group hits the bound exactly; exclude it. */ \
- 1)
#define SC_NBINS \
(/* Sub-regular size classes. */ \
SC_NTINY \
+ SC_NPSEUDO /* Groups with lg_regular_min_base <= lg_base <= lg_base_max */ \
+ SC_NGROUP \
* (LG_PAGE + SC_LG_NGROUP \
- SC_LG_FIRST_REGULAR_BASE) /* Last SC of the last group hits the bound exactly; exclude it. */ \
- 1)
/*
* The size2index_tab lookup table uses uint8_t to encode each bin index, so we
* cannot support more than 256 small size classes.
*/
#if (SC_NBINS > 256)
# error "Too many small size classes"
# error "Too many small size classes"
#endif
/* The largest size class in the lookup table, and its binary log. */
@ -256,12 +256,12 @@
#define SC_SMALL_MAX_DELTA (1 << (LG_PAGE - 1))
/* The largest size class allocated out of a slab. */
#define SC_SMALL_MAXCLASS (SC_SMALL_MAX_BASE \
+ (SC_NGROUP - 1) * SC_SMALL_MAX_DELTA)
#define SC_SMALL_MAXCLASS \
(SC_SMALL_MAX_BASE + (SC_NGROUP - 1) * SC_SMALL_MAX_DELTA)
/* The fastpath assumes all lookup-able sizes are small. */
#if (SC_SMALL_MAXCLASS < SC_LOOKUP_MAXCLASS)
# error "Lookup table sizes must be small"
# error "Lookup table sizes must be small"
#endif
/* The smallest size class not allocated out of a slab. */
@ -277,13 +277,13 @@
/* Maximum number of regions in one slab. */
#ifndef CONFIG_LG_SLAB_MAXREGS
# define SC_LG_SLAB_MAXREGS (LG_PAGE - SC_LG_TINY_MIN)
# define SC_LG_SLAB_MAXREGS (LG_PAGE - SC_LG_TINY_MIN)
#else
# if CONFIG_LG_SLAB_MAXREGS < (LG_PAGE - SC_LG_TINY_MIN)
# error "Unsupported SC_LG_SLAB_MAXREGS"
# else
# define SC_LG_SLAB_MAXREGS CONFIG_LG_SLAB_MAXREGS
# endif
# if CONFIG_LG_SLAB_MAXREGS < (LG_PAGE - SC_LG_TINY_MIN)
# error "Unsupported SC_LG_SLAB_MAXREGS"
# else
# define SC_LG_SLAB_MAXREGS CONFIG_LG_SLAB_MAXREGS
# endif
#endif
/*
@ -364,13 +364,13 @@ struct sc_data_s {
};
size_t reg_size_compute(int lg_base, int lg_delta, int ndelta);
void sc_data_init(sc_data_t *data);
void sc_data_init(sc_data_t *data);
/*
* Updates slab sizes in [begin, end] to be pgs pages in length, if possible.
* Otherwise, does its best to accommodate the request.
*/
void sc_data_update_slab_size(sc_data_t *data, size_t begin, size_t end,
int pgs);
void sc_data_update_slab_size(
sc_data_t *data, size_t begin, size_t end, int pgs);
void sc_boot(sc_data_t *data);
#endif /* JEMALLOC_INTERNAL_SC_H */

View file

@ -59,7 +59,7 @@ struct sec_bin_s {
* stats; rather, it allows us to quickly determine the change in the
* centralized counter when flushing.
*/
size_t bytes_cur;
size_t bytes_cur;
edata_list_active_t freelist;
};
@ -80,7 +80,7 @@ struct sec_shard_s {
* that we won't go down these pathways very often after custom extent
* hooks are installed.
*/
bool enabled;
bool enabled;
sec_bin_t *bins;
/* Number of bytes in all bins in the shard. */
size_t bytes_cur;
@ -90,12 +90,12 @@ struct sec_shard_s {
typedef struct sec_s sec_t;
struct sec_s {
pai_t pai;
pai_t pai;
pai_t *fallback;
sec_opts_t opts;
sec_opts_t opts;
sec_shard_t *shards;
pszind_t npsizes;
pszind_t npsizes;
};
bool sec_init(tsdn_t *tsdn, sec_t *sec, base_t *base, pai_t *fallback,
@ -110,8 +110,8 @@ void sec_disable(tsdn_t *tsdn, sec_t *sec);
* split), which simplifies the stats management.
*/
void sec_stats_merge(tsdn_t *tsdn, sec_t *sec, sec_stats_t *stats);
void sec_mutex_stats_read(tsdn_t *tsdn, sec_t *sec,
mutex_prof_data_t *mutex_prof_data);
void sec_mutex_stats_read(
tsdn_t *tsdn, sec_t *sec, mutex_prof_data_t *mutex_prof_data);
/*
* We use the arena lock ordering; these are acquired in phase 2 of forking, but

View file

@ -44,18 +44,14 @@ struct sec_opts_s {
size_t batch_fill_extra;
};
#define SEC_OPTS_DEFAULT { \
/* nshards */ \
4, \
/* max_alloc */ \
(32 * 1024) < PAGE ? PAGE : (32 * 1024), \
/* max_bytes */ \
256 * 1024, \
/* bytes_after_flush */ \
128 * 1024, \
/* batch_fill_extra */ \
0 \
}
#define SEC_OPTS_DEFAULT \
{ \
/* nshards */ \
4, /* max_alloc */ \
(32 * 1024) < PAGE ? PAGE : (32 * 1024), /* max_bytes */ \
256 * 1024, /* bytes_after_flush */ \
128 * 1024, /* batch_fill_extra */ \
0 \
}
#endif /* JEMALLOC_INTERNAL_SEC_OPTS_H */

View file

@ -23,210 +23,210 @@
* smootheststep(x) = -20x + 70x - 84x + 35x
*/
#define SMOOTHSTEP_VARIANT "smoother"
#define SMOOTHSTEP_NSTEPS 200
#define SMOOTHSTEP_BFP 24
#define SMOOTHSTEP \
/* STEP(step, h, x, y) */ \
STEP( 1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \
STEP( 2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \
STEP( 3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \
STEP( 4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \
STEP( 5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \
STEP( 6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \
STEP( 7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \
STEP( 8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \
STEP( 9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \
STEP( 10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \
STEP( 11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \
STEP( 12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \
STEP( 13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \
STEP( 14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \
STEP( 15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \
STEP( 16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \
STEP( 17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \
STEP( 18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \
STEP( 19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \
STEP( 20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \
STEP( 21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \
STEP( 22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \
STEP( 23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \
STEP( 24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \
STEP( 25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \
STEP( 26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \
STEP( 27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \
STEP( 28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \
STEP( 29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \
STEP( 30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \
STEP( 31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \
STEP( 32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \
STEP( 33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \
STEP( 34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \
STEP( 35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \
STEP( 36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \
STEP( 37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \
STEP( 38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \
STEP( 39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \
STEP( 40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \
STEP( 41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \
STEP( 42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \
STEP( 43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \
STEP( 44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \
STEP( 45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \
STEP( 46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \
STEP( 47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \
STEP( 48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \
STEP( 49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \
STEP( 50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \
STEP( 51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \
STEP( 52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \
STEP( 53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \
STEP( 54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \
STEP( 55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \
STEP( 56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \
STEP( 57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \
STEP( 58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \
STEP( 59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \
STEP( 60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \
STEP( 61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \
STEP( 62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \
STEP( 63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \
STEP( 64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \
STEP( 65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \
STEP( 66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \
STEP( 67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \
STEP( 68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \
STEP( 69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \
STEP( 70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \
STEP( 71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \
STEP( 72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \
STEP( 73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \
STEP( 74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \
STEP( 75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \
STEP( 76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \
STEP( 77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \
STEP( 78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \
STEP( 79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \
STEP( 80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \
STEP( 81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \
STEP( 82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \
STEP( 83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \
STEP( 84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \
STEP( 85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \
STEP( 86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \
STEP( 87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \
STEP( 88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \
STEP( 89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \
STEP( 90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \
STEP( 91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \
STEP( 92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \
STEP( 93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \
STEP( 94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \
STEP( 95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \
STEP( 96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \
STEP( 97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \
STEP( 98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \
STEP( 99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \
STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \
STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \
STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \
STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \
STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \
STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \
STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \
STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \
STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \
STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \
STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \
STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \
STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \
STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \
STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \
STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \
STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \
STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \
STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \
STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \
STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \
STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \
STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \
STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \
STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \
STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \
STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \
STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \
STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \
STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \
STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \
STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \
STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \
STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \
STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \
STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \
STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \
STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \
STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \
STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \
STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \
STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \
STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \
STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \
STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \
STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \
STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \
STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \
STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \
STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \
STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \
STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \
STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \
STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \
STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \
STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \
STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \
STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \
STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \
STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \
STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \
STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \
STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \
STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \
STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \
STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \
STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \
STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \
STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \
STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \
STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \
STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \
STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \
STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \
STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \
STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \
STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \
STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \
STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \
STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \
STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \
STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \
STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \
STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \
STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \
STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \
STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \
STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \
STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \
STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \
STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \
STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \
STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \
STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \
STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \
STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \
STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \
STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \
STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \
STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \
STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \
#define SMOOTHSTEP_VARIANT "smoother"
#define SMOOTHSTEP_NSTEPS 200
#define SMOOTHSTEP_BFP 24
#define SMOOTHSTEP \
/* STEP(step, h, x, y) */ \
STEP(1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \
STEP(2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \
STEP(3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \
STEP(4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \
STEP(5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \
STEP(6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \
STEP(7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \
STEP(8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \
STEP(9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \
STEP(10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \
STEP(11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \
STEP(12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \
STEP(13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \
STEP(14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \
STEP(15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \
STEP(16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \
STEP(17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \
STEP(18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \
STEP(19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \
STEP(20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \
STEP(21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \
STEP(22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \
STEP(23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \
STEP(24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \
STEP(25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \
STEP(26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \
STEP(27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \
STEP(28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \
STEP(29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \
STEP(30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \
STEP(31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \
STEP(32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \
STEP(33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \
STEP(34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \
STEP(35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \
STEP(36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \
STEP(37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \
STEP(38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \
STEP(39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \
STEP(40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \
STEP(41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \
STEP(42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \
STEP(43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \
STEP(44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \
STEP(45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \
STEP(46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \
STEP(47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \
STEP(48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \
STEP(49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \
STEP(50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \
STEP(51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \
STEP(52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \
STEP(53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \
STEP(54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \
STEP(55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \
STEP(56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \
STEP(57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \
STEP(58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \
STEP(59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \
STEP(60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \
STEP(61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \
STEP(62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \
STEP(63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \
STEP(64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \
STEP(65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \
STEP(66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \
STEP(67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \
STEP(68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \
STEP(69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \
STEP(70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \
STEP(71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \
STEP(72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \
STEP(73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \
STEP(74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \
STEP(75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \
STEP(76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \
STEP(77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \
STEP(78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \
STEP(79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \
STEP(80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \
STEP(81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \
STEP(82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \
STEP(83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \
STEP(84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \
STEP(85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \
STEP(86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \
STEP(87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \
STEP(88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \
STEP(89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \
STEP(90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \
STEP(91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \
STEP(92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \
STEP(93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \
STEP(94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \
STEP(95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \
STEP(96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \
STEP(97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \
STEP(98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \
STEP(99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \
STEP(100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \
STEP(101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \
STEP(102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \
STEP(103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \
STEP(104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \
STEP(105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \
STEP(106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \
STEP(107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \
STEP(108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \
STEP(109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \
STEP(110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \
STEP(111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \
STEP(112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \
STEP(113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \
STEP(114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \
STEP(115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \
STEP(116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \
STEP(117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \
STEP(118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \
STEP(119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \
STEP(120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \
STEP(121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \
STEP(122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \
STEP(123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \
STEP(124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \
STEP(125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \
STEP(126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \
STEP(127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \
STEP(128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \
STEP(129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \
STEP(130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \
STEP(131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \
STEP(132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \
STEP(133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \
STEP(134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \
STEP(135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \
STEP(136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \
STEP(137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \
STEP(138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \
STEP(139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \
STEP(140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \
STEP(141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \
STEP(142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \
STEP(143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \
STEP(144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \
STEP(145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \
STEP(146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \
STEP(147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \
STEP(148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \
STEP(149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \
STEP(150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \
STEP(151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \
STEP(152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \
STEP(153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \
STEP(154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \
STEP(155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \
STEP(156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \
STEP(157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \
STEP(158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \
STEP(159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \
STEP(160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \
STEP(161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \
STEP(162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \
STEP(163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \
STEP(164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \
STEP(165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \
STEP(166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \
STEP(167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \
STEP(168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \
STEP(169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \
STEP(170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \
STEP(171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \
STEP(172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \
STEP(173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \
STEP(174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \
STEP(175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \
STEP(176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \
STEP(177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \
STEP(178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \
STEP(179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \
STEP(180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \
STEP(181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \
STEP(182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \
STEP(183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \
STEP(184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \
STEP(185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \
STEP(186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \
STEP(187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \
STEP(188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \
STEP(189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \
STEP(190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \
STEP(191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \
STEP(192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \
STEP(193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \
STEP(194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \
STEP(195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \
STEP(196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \
STEP(197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \
STEP(198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \
STEP(199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \
STEP(200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000)
#endif /* JEMALLOC_INTERNAL_SMOOTHSTEP_H */

View file

@ -3,7 +3,8 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#define SPIN_INITIALIZER {0U}
#define SPIN_INITIALIZER \
{ 0U }
typedef struct {
unsigned iteration;
@ -11,12 +12,12 @@ typedef struct {
static inline void
spin_cpu_spinwait(void) {
# if HAVE_CPU_SPINWAIT
#if HAVE_CPU_SPINWAIT
CPU_SPINWAIT;
# else
#else
volatile int x = 0;
x = x;
# endif
#endif
}
static inline void

View file

@ -7,32 +7,32 @@
#include "jemalloc/internal/tsd_types.h"
/* OPTION(opt, var_name, default, set_value_to) */
#define STATS_PRINT_OPTIONS \
OPTION('J', json, false, true) \
OPTION('g', general, true, false) \
OPTION('m', merged, config_stats, false) \
OPTION('d', destroyed, config_stats, false) \
OPTION('a', unmerged, config_stats, false) \
OPTION('b', bins, true, false) \
OPTION('l', large, true, false) \
OPTION('x', mutex, true, false) \
OPTION('e', extents, true, false) \
OPTION('h', hpa, config_stats, false)
#define STATS_PRINT_OPTIONS \
OPTION('J', json, false, true) \
OPTION('g', general, true, false) \
OPTION('m', merged, config_stats, false) \
OPTION('d', destroyed, config_stats, false) \
OPTION('a', unmerged, config_stats, false) \
OPTION('b', bins, true, false) \
OPTION('l', large, true, false) \
OPTION('x', mutex, true, false) \
OPTION('e', extents, true, false) \
OPTION('h', hpa, config_stats, false)
enum {
#define OPTION(o, v, d, s) stats_print_option_num_##v,
STATS_PRINT_OPTIONS
STATS_PRINT_OPTIONS
#undef OPTION
stats_print_tot_num_options
stats_print_tot_num_options
};
/* Options for stats_print. */
extern bool opt_stats_print;
extern char opt_stats_print_opts[stats_print_tot_num_options+1];
extern char opt_stats_print_opts[stats_print_tot_num_options + 1];
/* Utilities for stats_interval. */
extern int64_t opt_stats_interval;
extern char opt_stats_interval_opts[stats_print_tot_num_options+1];
extern char opt_stats_interval_opts[stats_print_tot_num_options + 1];
#define STATS_INTERVAL_DEFAULT -1
/*

View file

@ -76,8 +76,9 @@ sz_psz2ind(size_t psz) {
* SC_NGROUP. off_to_first_ps_rg begins from 1, instead of 0. e.g.
* off_to_first_ps_rg is 1 when psz is (PAGE * SC_NGROUP + 1).
*/
pszind_t off_to_first_ps_rg = (x < SC_LG_NGROUP + LG_PAGE) ?
0 : x - (SC_LG_NGROUP + LG_PAGE);
pszind_t off_to_first_ps_rg = (x < SC_LG_NGROUP + LG_PAGE)
? 0
: x - (SC_LG_NGROUP + LG_PAGE);
/*
* Same as sc_s::lg_delta.
@ -85,8 +86,9 @@ sz_psz2ind(size_t psz) {
* for each increase in offset, it's multiplied by two.
* Therefore, lg_delta = LG_PAGE + (off_to_first_ps_rg - 1).
*/
pszind_t lg_delta = (off_to_first_ps_rg == 0) ?
LG_PAGE : LG_PAGE + (off_to_first_ps_rg - 1);
pszind_t lg_delta = (off_to_first_ps_rg == 0)
? LG_PAGE
: LG_PAGE + (off_to_first_ps_rg - 1);
/*
* Let's write psz in binary, e.g. 0011 for 0x3, 0111 for 0x7.
@ -118,13 +120,13 @@ sz_pind2sz_compute(pszind_t pind) {
size_t grp = pind >> SC_LG_NGROUP;
size_t mod = pind & ((ZU(1) << SC_LG_NGROUP) - 1);
size_t grp_size_mask = ~((!!grp)-1);
size_t grp_size = ((ZU(1) << (LG_PAGE + (SC_LG_NGROUP-1))) << grp)
size_t grp_size_mask = ~((!!grp) - 1);
size_t grp_size = ((ZU(1) << (LG_PAGE + (SC_LG_NGROUP - 1))) << grp)
& grp_size_mask;
size_t shift = (grp == 0) ? 1 : grp;
size_t lg_delta = shift + (LG_PAGE-1);
size_t mod_size = (mod+1) << lg_delta;
size_t lg_delta = shift + (LG_PAGE - 1);
size_t mod_size = (mod + 1) << lg_delta;
size_t sz = grp_size + mod_size;
return sz;
@ -148,9 +150,10 @@ sz_psz2u(size_t psz) {
if (unlikely(psz > SC_LARGE_MAXCLASS)) {
return SC_LARGE_MAXCLASS + PAGE;
}
size_t x = lg_floor((psz<<1)-1);
size_t lg_delta = (x < SC_LG_NGROUP + LG_PAGE + 1) ?
LG_PAGE : x - SC_LG_NGROUP - 1;
size_t x = lg_floor((psz << 1) - 1);
size_t lg_delta = (x < SC_LG_NGROUP + LG_PAGE + 1)
? LG_PAGE
: x - SC_LG_NGROUP - 1;
size_t delta = ZU(1) << lg_delta;
size_t delta_mask = delta - 1;
size_t usize = (psz + delta_mask) & ~delta_mask;
@ -174,17 +177,19 @@ sz_size2index_compute_inline(size_t size) {
}
#endif
{
szind_t x = lg_floor((size<<1)-1);
szind_t shift = (x < SC_LG_NGROUP + LG_QUANTUM) ? 0 :
x - (SC_LG_NGROUP + LG_QUANTUM);
szind_t x = lg_floor((size << 1) - 1);
szind_t shift = (x < SC_LG_NGROUP + LG_QUANTUM)
? 0
: x - (SC_LG_NGROUP + LG_QUANTUM);
szind_t grp = shift << SC_LG_NGROUP;
szind_t lg_delta = (x < SC_LG_NGROUP + LG_QUANTUM + 1)
? LG_QUANTUM : x - SC_LG_NGROUP - 1;
? LG_QUANTUM
: x - SC_LG_NGROUP - 1;
size_t delta_inverse_mask = ZU(-1) << lg_delta;
szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
((ZU(1) << SC_LG_NGROUP) - 1);
size_t delta_inverse_mask = ZU(-1) << lg_delta;
szind_t mod = ((((size - 1) & delta_inverse_mask) >> lg_delta))
& ((ZU(1) << SC_LG_NGROUP) - 1);
szind_t index = SC_NTINY + grp + mod;
return index;
@ -228,16 +233,16 @@ sz_index2size_compute_inline(szind_t index) {
{
size_t reduced_index = index - SC_NTINY;
size_t grp = reduced_index >> SC_LG_NGROUP;
size_t mod = reduced_index & ((ZU(1) << SC_LG_NGROUP) -
1);
size_t mod = reduced_index & ((ZU(1) << SC_LG_NGROUP) - 1);
size_t grp_size_mask = ~((!!grp)-1);
size_t grp_size = ((ZU(1) << (LG_QUANTUM +
(SC_LG_NGROUP-1))) << grp) & grp_size_mask;
size_t grp_size_mask = ~((!!grp) - 1);
size_t grp_size = ((ZU(1) << (LG_QUANTUM + (SC_LG_NGROUP - 1)))
<< grp)
& grp_size_mask;
size_t shift = (grp == 0) ? 1 : grp;
size_t lg_delta = shift + (LG_QUANTUM-1);
size_t mod_size = (mod+1) << lg_delta;
size_t lg_delta = shift + (LG_QUANTUM - 1);
size_t mod_size = (mod + 1) << lg_delta;
size_t usize = grp_size + mod_size;
return usize;
@ -269,8 +274,8 @@ sz_index2size_unsafe(szind_t index) {
JEMALLOC_ALWAYS_INLINE size_t
sz_index2size(szind_t index) {
assert(!sz_large_size_classes_disabled() ||
index <= sz_size2index(USIZE_GROW_SLOW_THRESHOLD));
assert(!sz_large_size_classes_disabled()
|| index <= sz_size2index(USIZE_GROW_SLOW_THRESHOLD));
size_t size = sz_index2size_unsafe(index);
/*
* With large size classes disabled, the usize above
@ -285,8 +290,8 @@ sz_index2size(szind_t index) {
* the size is no larger than USIZE_GROW_SLOW_THRESHOLD here
* instead of SC_LARGE_MINCLASS.
*/
assert(!sz_large_size_classes_disabled() ||
size <= USIZE_GROW_SLOW_THRESHOLD);
assert(!sz_large_size_classes_disabled()
|| size <= USIZE_GROW_SLOW_THRESHOLD);
return size;
}
@ -309,9 +314,10 @@ sz_size2index_usize_fastpath(size_t size, szind_t *ind, size_t *usize) {
JEMALLOC_ALWAYS_INLINE size_t
sz_s2u_compute_using_delta(size_t size) {
size_t x = lg_floor((size<<1)-1);
size_t x = lg_floor((size << 1) - 1);
size_t lg_delta = (x < SC_LG_NGROUP + LG_QUANTUM + 1)
? LG_QUANTUM : x - SC_LG_NGROUP - 1;
? LG_QUANTUM
: x - SC_LG_NGROUP - 1;
size_t delta = ZU(1) << lg_delta;
size_t delta_mask = delta - 1;
size_t usize = (size + delta_mask) & ~delta_mask;
@ -331,8 +337,8 @@ sz_s2u_compute(size_t size) {
if (size <= (ZU(1) << SC_LG_TINY_MAXCLASS)) {
size_t lg_tmin = SC_LG_TINY_MAXCLASS - SC_NTINY + 1;
size_t lg_ceil = lg_floor(pow2_ceil_zu(size));
return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
(ZU(1) << lg_ceil));
return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin)
: (ZU(1) << lg_ceil));
}
#endif
if (size <= SC_SMALL_MAXCLASS || !sz_large_size_classes_disabled()) {

View file

@ -8,15 +8,15 @@
#include "jemalloc/internal/tcache_types.h"
#include "jemalloc/internal/thread_event_registry.h"
extern bool opt_tcache;
extern size_t opt_tcache_max;
extern ssize_t opt_lg_tcache_nslots_mul;
extern bool opt_tcache;
extern size_t opt_tcache_max;
extern ssize_t opt_lg_tcache_nslots_mul;
extern unsigned opt_tcache_nslots_small_min;
extern unsigned opt_tcache_nslots_small_max;
extern unsigned opt_tcache_nslots_large;
extern ssize_t opt_lg_tcache_shift;
extern size_t opt_tcache_gc_incr_bytes;
extern size_t opt_tcache_gc_delay_bytes;
extern ssize_t opt_lg_tcache_shift;
extern size_t opt_tcache_gc_incr_bytes;
extern size_t opt_tcache_gc_delay_bytes;
extern unsigned opt_lg_tcache_flush_small_div;
extern unsigned opt_lg_tcache_flush_large_div;
@ -27,14 +27,14 @@ extern unsigned opt_lg_tcache_flush_large_div;
* it should not be changed on the fly. To change the number of tcache bins
* in use, refer to tcache_nbins of each tcache.
*/
extern unsigned global_do_not_change_tcache_nbins;
extern unsigned global_do_not_change_tcache_nbins;
/*
* Maximum cached size class. Same as above, this is only used during threads
* initialization and should not be changed. To change the maximum cached size
* class, refer to tcache_max of each tcache.
*/
extern size_t global_do_not_change_tcache_maxclass;
extern size_t global_do_not_change_tcache_maxclass;
/*
* Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
@ -44,11 +44,11 @@ extern size_t global_do_not_change_tcache_maxclass;
* touched. This allows the entire array to be allocated the first time an
* explicit tcache is created without a disproportionate impact on memory usage.
*/
extern tcaches_t *tcaches;
extern tcaches_t *tcaches;
size_t tcache_salloc(tsdn_t *tsdn, const void *ptr);
void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
cache_bin_t *cache_bin, szind_t binind, bool *tcache_success);
void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
cache_bin_t *cache_bin, szind_t binind, bool *tcache_success);
void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache,
cache_bin_t *cache_bin, szind_t binind, unsigned rem);
@ -56,23 +56,23 @@ void tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache,
cache_bin_t *cache_bin, szind_t binind, unsigned rem);
void tcache_bin_flush_stashed(tsd_t *tsd, tcache_t *tcache,
cache_bin_t *cache_bin, szind_t binind, bool is_small);
bool tcache_bin_info_default_init(const char *bin_settings_segment_cur,
size_t len_left);
bool tcache_bin_info_default_init(
const char *bin_settings_segment_cur, size_t len_left);
bool tcache_bins_ncached_max_write(tsd_t *tsd, char *settings, size_t len);
bool tcache_bin_ncached_max_read(tsd_t *tsd, size_t bin_size,
cache_bin_sz_t *ncached_max);
void tcache_arena_reassociate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
tcache_t *tcache, arena_t *arena);
bool tcache_bin_ncached_max_read(
tsd_t *tsd, size_t bin_size, cache_bin_sz_t *ncached_max);
void tcache_arena_reassociate(
tsdn_t *tsdn, tcache_slow_t *tcache_slow, tcache_t *tcache, arena_t *arena);
tcache_t *tcache_create_explicit(tsd_t *tsd);
void thread_tcache_max_set(tsd_t *tsd, size_t tcache_max);
void tcache_cleanup(tsd_t *tsd);
void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
bool tcaches_create(tsd_t *tsd, base_t *base, unsigned *r_ind);
void tcaches_flush(tsd_t *tsd, unsigned ind);
void tcaches_destroy(tsd_t *tsd, unsigned ind);
bool tcache_boot(tsdn_t *tsdn, base_t *base);
void tcache_arena_associate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
tcache_t *tcache, arena_t *arena);
void thread_tcache_max_set(tsd_t *tsd, size_t tcache_max);
void tcache_cleanup(tsd_t *tsd);
void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
bool tcaches_create(tsd_t *tsd, base_t *base, unsigned *r_ind);
void tcaches_flush(tsd_t *tsd, unsigned ind);
void tcaches_destroy(tsd_t *tsd, unsigned ind);
bool tcache_boot(tsdn_t *tsdn, base_t *base);
void tcache_arena_associate(
tsdn_t *tsdn, tcache_slow_t *tcache_slow, tcache_t *tcache, arena_t *arena);
void tcache_prefork(tsdn_t *tsdn);
void tcache_postfork_parent(tsdn_t *tsdn);
void tcache_postfork_child(tsdn_t *tsdn);

View file

@ -42,8 +42,8 @@ tcache_max_set(tcache_slow_t *tcache_slow, size_t tcache_max) {
}
static inline void
tcache_bin_settings_backup(tcache_t *tcache,
cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX]) {
tcache_bin_settings_backup(
tcache_t *tcache, cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX]) {
for (unsigned i = 0; i < TCACHE_NBINS_MAX; i++) {
cache_bin_info_init(&tcache_bin_info[i],
cache_bin_ncached_max_get_unsafe(&tcache->bins[i]));
@ -51,8 +51,7 @@ tcache_bin_settings_backup(tcache_t *tcache,
}
JEMALLOC_ALWAYS_INLINE bool
tcache_bin_disabled(szind_t ind, cache_bin_t *bin,
tcache_slow_t *tcache_slow) {
tcache_bin_disabled(szind_t ind, cache_bin_t *bin, tcache_slow_t *tcache_slow) {
assert(bin != NULL);
assert(ind < TCACHE_NBINS_MAX);
bool disabled = cache_bin_disabled(bin);
@ -66,7 +65,7 @@ tcache_bin_disabled(szind_t ind, cache_bin_t *bin,
* ind >= nbins or ncached_max == 0. If a bin is enabled, it has
* ind < nbins and ncached_max > 0.
*/
unsigned nbins = tcache_nbins_get(tcache_slow);
unsigned nbins = tcache_nbins_get(tcache_slow);
cache_bin_sz_t ncached_max = cache_bin_ncached_max_get_unsafe(bin);
if (ind >= nbins) {
assert(disabled);
@ -88,10 +87,10 @@ tcache_bin_disabled(szind_t ind, cache_bin_t *bin,
}
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
size_t size, szind_t binind, bool zero, bool slow_path) {
tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
szind_t binind, bool zero, bool slow_path) {
void *ret;
bool tcache_success;
bool tcache_success;
assert(binind < SC_NBINS);
cache_bin_t *bin = &tcache->bins[binind];
@ -103,8 +102,8 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
if (unlikely(arena == NULL)) {
return NULL;
}
if (unlikely(tcache_bin_disabled(binind, bin,
tcache->tcache_slow))) {
if (unlikely(tcache_bin_disabled(
binind, bin, tcache->tcache_slow))) {
/* stats and zero are handled directly by the arena. */
return arena_malloc_hard(tsd_tsdn(tsd), arena, size,
binind, zero, /* slab */ true);
@ -112,8 +111,8 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
tcache_bin_flush_stashed(tsd, tcache, bin, binind,
/* is_small */ true);
ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
bin, binind, &tcache_hard_success);
ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache, bin,
binind, &tcache_hard_success);
if (tcache_hard_success == false) {
return NULL;
}
@ -135,11 +134,11 @@ JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
szind_t binind, bool zero, bool slow_path) {
void *ret;
bool tcache_success;
bool tcache_success;
cache_bin_t *bin = &tcache->bins[binind];
assert(binind >= SC_NBINS &&
!tcache_bin_disabled(binind, bin, tcache->tcache_slow));
assert(binind >= SC_NBINS
&& !tcache_bin_disabled(binind, bin, tcache->tcache_slow));
ret = cache_bin_alloc(bin, &tcache_success);
assert(tcache_success == (ret != NULL));
if (unlikely(!tcache_success)) {
@ -174,8 +173,8 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
}
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
bool slow_path) {
tcache_dalloc_small(
tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, bool slow_path) {
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SC_SMALL_MAXCLASS);
cache_bin_t *bin = &tcache->bins[binind];
@ -195,13 +194,13 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
}
if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) {
if (unlikely(tcache_bin_disabled(binind, bin,
tcache->tcache_slow))) {
if (unlikely(tcache_bin_disabled(
binind, bin, tcache->tcache_slow))) {
arena_dalloc_small(tsd_tsdn(tsd), ptr);
return;
}
cache_bin_sz_t max = cache_bin_ncached_max_get(bin);
unsigned remain = max >> opt_lg_tcache_flush_small_div;
unsigned remain = max >> opt_lg_tcache_flush_small_div;
tcache_bin_flush_small(tsd, tcache, bin, binind, remain);
bool ret = cache_bin_dalloc_easy(bin, ptr);
assert(ret);
@ -209,19 +208,18 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
}
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
bool slow_path) {
tcache_dalloc_large(
tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, bool slow_path) {
assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SC_SMALL_MAXCLASS);
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <=
tcache_max_get(tcache->tcache_slow));
assert(!tcache_bin_disabled(binind, &tcache->bins[binind],
tcache->tcache_slow));
assert(tcache_salloc(tsd_tsdn(tsd), ptr)
<= tcache_max_get(tcache->tcache_slow));
assert(!tcache_bin_disabled(
binind, &tcache->bins[binind], tcache->tcache_slow));
cache_bin_t *bin = &tcache->bins[binind];
if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) {
unsigned remain = cache_bin_ncached_max_get(bin) >>
opt_lg_tcache_flush_large_div;
unsigned remain = cache_bin_ncached_max_get(bin)
>> opt_lg_tcache_flush_large_div;
tcache_bin_flush_large(tsd, tcache, bin, binind, remain);
bool ret = cache_bin_dalloc_easy(bin, ptr);
assert(ret);

View file

@ -30,45 +30,45 @@ struct tcache_slow_s {
cache_bin_array_descriptor_t cache_bin_array_descriptor;
/* The arena this tcache is associated with. */
arena_t *arena;
arena_t *arena;
/* The number of bins activated in the tcache. */
unsigned tcache_nbins;
unsigned tcache_nbins;
/* Last time GC has been performed. */
nstime_t last_gc_time;
nstime_t last_gc_time;
/* Next bin to GC. */
szind_t next_gc_bin;
szind_t next_gc_bin_small;
szind_t next_gc_bin_large;
szind_t next_gc_bin;
szind_t next_gc_bin_small;
szind_t next_gc_bin_large;
/* For small bins, help determine how many items to fill at a time. */
cache_bin_fill_ctl_t bin_fill_ctl_do_not_access_directly[SC_NBINS];
cache_bin_fill_ctl_t bin_fill_ctl_do_not_access_directly[SC_NBINS];
/* For small bins, whether has been refilled since last GC. */
bool bin_refilled[SC_NBINS];
bool bin_refilled[SC_NBINS];
/*
* For small bins, the number of items we can pretend to flush before
* actually flushing.
*/
uint8_t bin_flush_delay_items[SC_NBINS];
uint8_t bin_flush_delay_items[SC_NBINS];
/*
* The start of the allocation containing the dynamic allocation for
* either the cache bins alone, or the cache bin memory as well as this
* tcache_slow_t and its associated tcache_t.
*/
void *dyn_alloc;
void *dyn_alloc;
/* The associated bins. */
tcache_t *tcache;
tcache_t *tcache;
};
struct tcache_s {
tcache_slow_t *tcache_slow;
cache_bin_t bins[TCACHE_NBINS_MAX];
tcache_slow_t *tcache_slow;
cache_bin_t bins[TCACHE_NBINS_MAX];
};
/* Linkage for list of available (previously used) explicit tcache IDs. */
struct tcaches_s {
union {
tcache_t *tcache;
tcaches_t *next;
tcache_t *tcache;
tcaches_t *next;
};
};

View file

@ -5,12 +5,16 @@
#include "jemalloc/internal/sc.h"
typedef struct tcache_slow_s tcache_slow_t;
typedef struct tcache_s tcache_t;
typedef struct tcaches_s tcaches_t;
typedef struct tcache_s tcache_t;
typedef struct tcaches_s tcaches_t;
/* Used in TSD static initializer only. Real init in tsd_tcache_data_init(). */
#define TCACHE_ZERO_INITIALIZER {0}
#define TCACHE_SLOW_ZERO_INITIALIZER {{0}}
#define TCACHE_ZERO_INITIALIZER \
{ 0 }
#define TCACHE_SLOW_ZERO_INITIALIZER \
{ \
{ 0 } \
}
/* Used in TSD static initializer only. Will be initialized to opt_tcache. */
#define TCACHE_ENABLED_ZERO_INITIALIZER false
@ -21,9 +25,11 @@ typedef struct tcaches_s tcaches_t;
#define TCACHE_LG_MAXCLASS_LIMIT LG_USIZE_GROW_SLOW_THRESHOLD
#define TCACHE_MAXCLASS_LIMIT ((size_t)1 << TCACHE_LG_MAXCLASS_LIMIT)
#define TCACHE_NBINS_MAX (SC_NBINS + SC_NGROUP * \
(TCACHE_LG_MAXCLASS_LIMIT - SC_LG_LARGE_MINCLASS) + 1)
#define TCACHE_GC_NEIGHBOR_LIMIT ((uintptr_t)1 << 21) /* 2M */
#define TCACHE_NBINS_MAX \
(SC_NBINS \
+ SC_NGROUP * (TCACHE_LG_MAXCLASS_LIMIT - SC_LG_LARGE_MINCLASS) \
+ 1)
#define TCACHE_GC_NEIGHBOR_LIMIT ((uintptr_t)1 << 21) /* 2M */
#define TCACHE_GC_INTERVAL_NS ((uint64_t)10 * KQU(1000000)) /* 10ms */
#define TCACHE_GC_SMALL_NBINS_MAX ((SC_NBINS > 8) ? (SC_NBINS >> 3) : 1)
#define TCACHE_GC_LARGE_NBINS_MAX 1

Some files were not shown because too many files have changed in this diff Show more