Move bin inline functions from arena_inlines_b.h to bin_inlines.h

This is a continuation of my previous clean-up change, now focusing on
the inline functions defined in header files.
This commit is contained in:
Carl Shapiro 2026-02-23 23:31:27 -08:00 committed by Guangli Dai
parent 1cc563f531
commit 0ac9380cf1
4 changed files with 131 additions and 118 deletions

View file

@ -4,6 +4,7 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/arena_externs.h"
#include "jemalloc/internal/arena_structs.h"
#include "jemalloc/internal/bin_inlines.h"
#include "jemalloc/internal/div.h"
#include "jemalloc/internal/emap.h"
#include "jemalloc/internal/jemalloc_internal_inlines_b.h"
@ -335,29 +336,6 @@ arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind,
}
}
/* Find the region index of a pointer. */
JEMALLOC_ALWAYS_INLINE size_t
arena_slab_regind_impl(
div_info_t *div_info, szind_t binind, edata_t *slab, const void *ptr) {
size_t diff, regind;
/* Freeing a pointer outside the slab can cause assertion failure. */
assert((uintptr_t)ptr >= (uintptr_t)edata_addr_get(slab));
assert((uintptr_t)ptr < (uintptr_t)edata_past_get(slab));
/* Freeing an interior pointer can cause assertion failure. */
assert(((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab))
% (uintptr_t)bin_infos[binind].reg_size
== 0);
diff = (size_t)((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab));
/* Avoid doing division with a variable divisor. */
regind = div_compute(div_info, diff);
assert(regind < bin_infos[binind].nregs);
return regind;
}
/* Checks whether ptr is currently active in the arena. */
JEMALLOC_ALWAYS_INLINE bool
arena_tcache_dalloc_small_safety_check(tsdn_t *tsdn, void *ptr) {
if (!config_debug) {
@ -367,10 +345,10 @@ arena_tcache_dalloc_small_safety_check(tsdn_t *tsdn, void *ptr) {
szind_t binind = edata_szind_get(edata);
div_info_t div_info = arena_binind_div_info[binind];
/*
* Calls the internal function arena_slab_regind_impl because the
* Calls the internal function bin_slab_regind_impl because the
* safety check does not require a lock.
*/
size_t regind = arena_slab_regind_impl(&div_info, binind, edata, ptr);
size_t regind = bin_slab_regind_impl(&div_info, binind, edata, ptr);
slab_data_t *slab_data = edata_slab_data_get(edata);
const bin_info_t *bin_info = &bin_infos[binind];
assert(edata_nfree_get(edata) < bin_info->nregs);
@ -551,84 +529,6 @@ arena_cache_oblivious_randomize(
}
}
/*
* The dalloc bin info contains just the information that the common paths need
* during tcache flushes. By force-inlining these paths, and using local copies
* of data (so that the compiler knows it's constant), we avoid a whole bunch of
* redundant loads and stores by leaving this information in registers.
*/
typedef struct arena_dalloc_bin_locked_info_s arena_dalloc_bin_locked_info_t;
struct arena_dalloc_bin_locked_info_s {
div_info_t div_info;
uint32_t nregs;
uint64_t ndalloc;
};
JEMALLOC_ALWAYS_INLINE size_t
arena_slab_regind(arena_dalloc_bin_locked_info_t *info, szind_t binind,
edata_t *slab, const void *ptr) {
size_t regind = arena_slab_regind_impl(
&info->div_info, binind, slab, ptr);
return regind;
}
JEMALLOC_ALWAYS_INLINE void
arena_dalloc_bin_locked_begin(
arena_dalloc_bin_locked_info_t *info, szind_t binind) {
info->div_info = arena_binind_div_info[binind];
info->nregs = bin_infos[binind].nregs;
info->ndalloc = 0;
}
/*
* Does the deallocation work associated with freeing a single pointer (a
* "step") in between a arena_dalloc_bin_locked begin and end call.
*
* Returns true if arena_slab_dalloc must be called on slab. Doesn't do
* stats updates, which happen during finish (this lets running counts get left
* in a register).
*/
JEMALLOC_ALWAYS_INLINE bool
arena_dalloc_bin_locked_step(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
arena_dalloc_bin_locked_info_t *info, szind_t binind, edata_t *slab,
void *ptr) {
const bin_info_t *bin_info = &bin_infos[binind];
size_t regind = arena_slab_regind(info, binind, slab, ptr);
slab_data_t *slab_data = edata_slab_data_get(slab);
assert(edata_nfree_get(slab) < bin_info->nregs);
/* Freeing an unallocated pointer can cause assertion failure. */
assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
edata_nfree_inc(slab);
if (config_stats) {
info->ndalloc++;
}
unsigned nfree = edata_nfree_get(slab);
if (nfree == bin_info->nregs) {
bin_dalloc_locked_handle_newly_empty(
tsdn, arena_is_auto(arena), slab, bin);
return true;
} else if (nfree == 1 && slab != bin->slabcur) {
bin_dalloc_locked_handle_newly_nonempty(
tsdn, arena_is_auto(arena), slab, bin);
}
return false;
}
JEMALLOC_ALWAYS_INLINE void
arena_dalloc_bin_locked_finish(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
arena_dalloc_bin_locked_info_t *info) {
if (config_stats) {
bin->stats.ndalloc += info->ndalloc;
assert(bin->stats.curregs >= (size_t)info->ndalloc);
bin->stats.curregs -= (size_t)info->ndalloc;
}
}
static inline bin_t *
arena_get_bin(arena_t *arena, szind_t binind, unsigned binshard) {
bin_t *shard0 = (bin_t *)((byte_t *)arena + arena_bin_offsets[binind]);

View file

@ -0,0 +1,112 @@
#ifndef JEMALLOC_INTERNAL_BIN_INLINES_H
#define JEMALLOC_INTERNAL_BIN_INLINES_H
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/bin_info.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/div.h"
#include "jemalloc/internal/edata.h"
#include "jemalloc/internal/sc.h"
/*
* The dalloc bin info contains just the information that the common paths need
* during tcache flushes. By force-inlining these paths, and using local copies
* of data (so that the compiler knows it's constant), we avoid a whole bunch of
* redundant loads and stores by leaving this information in registers.
*/
typedef struct bin_dalloc_locked_info_s bin_dalloc_locked_info_t;
struct bin_dalloc_locked_info_s {
div_info_t div_info;
uint32_t nregs;
uint64_t ndalloc;
};
/* Find the region index of a pointer within a slab. */
JEMALLOC_ALWAYS_INLINE size_t
bin_slab_regind_impl(
div_info_t *div_info, szind_t binind, edata_t *slab, const void *ptr) {
size_t diff, regind;
/* Freeing a pointer outside the slab can cause assertion failure. */
assert((uintptr_t)ptr >= (uintptr_t)edata_addr_get(slab));
assert((uintptr_t)ptr < (uintptr_t)edata_past_get(slab));
/* Freeing an interior pointer can cause assertion failure. */
assert(((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab))
% (uintptr_t)bin_infos[binind].reg_size
== 0);
diff = (size_t)((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab));
/* Avoid doing division with a variable divisor. */
regind = div_compute(div_info, diff);
assert(regind < bin_infos[binind].nregs);
return regind;
}
JEMALLOC_ALWAYS_INLINE size_t
bin_slab_regind(bin_dalloc_locked_info_t *info, szind_t binind,
edata_t *slab, const void *ptr) {
size_t regind = bin_slab_regind_impl(
&info->div_info, binind, slab, ptr);
return regind;
}
JEMALLOC_ALWAYS_INLINE void
bin_dalloc_locked_begin(
bin_dalloc_locked_info_t *info, szind_t binind) {
info->div_info = arena_binind_div_info[binind];
info->nregs = bin_infos[binind].nregs;
info->ndalloc = 0;
}
/*
* Does the deallocation work associated with freeing a single pointer (a
* "step") in between a bin_dalloc_locked begin and end call.
*
* Returns true if arena_slab_dalloc must be called on slab. Doesn't do
* stats updates, which happen during finish (this lets running counts get left
* in a register).
*/
JEMALLOC_ALWAYS_INLINE bool
bin_dalloc_locked_step(tsdn_t *tsdn, bool is_auto, bin_t *bin,
bin_dalloc_locked_info_t *info, szind_t binind, edata_t *slab,
void *ptr) {
const bin_info_t *bin_info = &bin_infos[binind];
size_t regind = bin_slab_regind(info, binind, slab, ptr);
slab_data_t *slab_data = edata_slab_data_get(slab);
assert(edata_nfree_get(slab) < bin_info->nregs);
/* Freeing an unallocated pointer can cause assertion failure. */
assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
edata_nfree_inc(slab);
if (config_stats) {
info->ndalloc++;
}
unsigned nfree = edata_nfree_get(slab);
if (nfree == bin_info->nregs) {
bin_dalloc_locked_handle_newly_empty(
tsdn, is_auto, slab, bin);
return true;
} else if (nfree == 1 && slab != bin->slabcur) {
bin_dalloc_locked_handle_newly_nonempty(
tsdn, is_auto, slab, bin);
}
return false;
}
JEMALLOC_ALWAYS_INLINE void
bin_dalloc_locked_finish(tsdn_t *tsdn, bin_t *bin,
bin_dalloc_locked_info_t *info) {
if (config_stats) {
bin->stats.ndalloc += info->ndalloc;
assert(bin->stats.curregs >= (size_t)info->ndalloc);
bin->stats.curregs -= (size_t)info->ndalloc;
}
}
#endif /* JEMALLOC_INTERNAL_BIN_INLINES_H */

View file

@ -1171,11 +1171,11 @@ arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, edata_t *edata, void *ptr) {
bin_t *bin = arena_get_bin(arena, binind, binshard);
malloc_mutex_lock(tsdn, &bin->lock);
arena_dalloc_bin_locked_info_t info;
arena_dalloc_bin_locked_begin(&info, binind);
bool ret = arena_dalloc_bin_locked_step(
tsdn, arena, bin, &info, binind, edata, ptr);
arena_dalloc_bin_locked_finish(tsdn, arena, bin, &info);
bin_dalloc_locked_info_t info;
bin_dalloc_locked_begin(&info, binind);
bool ret = bin_dalloc_locked_step(
tsdn, arena_is_auto(arena), bin, &info, binind, edata, ptr);
bin_dalloc_locked_finish(tsdn, bin, &info);
malloc_mutex_unlock(tsdn, &bin->lock);
if (ret) {
@ -1330,12 +1330,13 @@ arena_ptr_array_flush_impl_small(tsdn_t *tsdn, szind_t binind,
/* Next flush objects. */
/* Init only to avoid used-uninitialized warning. */
arena_dalloc_bin_locked_info_t dalloc_bin_info = {0};
arena_dalloc_bin_locked_begin(&dalloc_bin_info, binind);
bin_dalloc_locked_info_t dalloc_bin_info = {0};
bin_dalloc_locked_begin(&dalloc_bin_info, binind);
for (unsigned i = prev_flush_start; i < flush_start; i++) {
void *ptr = arr->ptr[i];
edata_t *edata = item_edata[i].edata;
if (arena_dalloc_bin_locked_step(tsdn, cur_arena,
if (bin_dalloc_locked_step(tsdn,
arena_is_auto(cur_arena),
cur_bin, &dalloc_bin_info, binind, edata,
ptr)) {
dalloc_slabs[dalloc_count] = edata;
@ -1343,8 +1344,8 @@ arena_ptr_array_flush_impl_small(tsdn_t *tsdn, szind_t binind,
}
}
arena_dalloc_bin_locked_finish(
tsdn, cur_arena, cur_bin, &dalloc_bin_info);
bin_dalloc_locked_finish(
tsdn, cur_bin, &dalloc_bin_info);
malloc_mutex_unlock(tsdn, &cur_bin->lock);
arena_decay_ticks(

View file

@ -2,7 +2,7 @@
#define INVALID_ARENA_IND ((1U << MALLOCX_ARENA_BITS) - 1)
TEST_BEGIN(test_arena_slab_regind) {
TEST_BEGIN(test_bin_slab_regind) {
szind_t binind;
for (binind = 0; binind < SC_NBINS; binind++) {
@ -15,13 +15,13 @@ TEST_BEGIN(test_arena_slab_regind) {
false, true, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
expect_ptr_not_null(
edata_addr_get(&slab), "Unexpected malloc() failure");
arena_dalloc_bin_locked_info_t dalloc_info;
arena_dalloc_bin_locked_begin(&dalloc_info, binind);
bin_dalloc_locked_info_t dalloc_info;
bin_dalloc_locked_begin(&dalloc_info, binind);
for (regind = 0; regind < bin_info->nregs; regind++) {
void *reg = (void *)((uintptr_t)edata_addr_get(&slab)
+ (bin_info->reg_size * regind));
expect_zu_eq(
arena_slab_regind(&dalloc_info, binind, &slab, reg),
bin_slab_regind(&dalloc_info, binind, &slab, reg),
regind,
"Incorrect region index computed for size %zu",
bin_info->reg_size);
@ -33,5 +33,5 @@ TEST_END
int
main(void) {
return test(test_arena_slab_regind);
return test(test_bin_slab_regind);
}