Hide bin slab-locality query behind arena_locality_hint

bin_t is an arena implementation detail; tcache should not reach into
it. Extract the slab-address lookup into bin.c as bin_current_slab_addr,
and expose it to tcache only through arena_locality_hint(tsdn, arena,
szind), which composes bin_choose + bin_current_slab_addr.
This commit is contained in:
Slobodan Predolac 2026-05-05 10:00:04 -07:00
parent e286fba00a
commit 3c1c6ae419
6 changed files with 71 additions and 23 deletions

View file

@ -111,6 +111,7 @@ arena_t *arena_choose_huge(tsd_t *tsd);
size_t arena_fill_small_fresh(tsdn_t *tsdn, arena_t *arena, szind_t binind,
void **ptrs, size_t nfill, bool zero);
bool arena_boot(sc_data_t *sc_data, base_t *base, bool hpa);
void *arena_locality_hint(tsdn_t *tsdn, arena_t *arena, szind_t szind);
void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
void arena_prefork2(tsdn_t *tsdn, arena_t *arena);

View file

@ -95,6 +95,9 @@ bool bin_refill_slabcur_no_fresh_slab(tsdn_t *tsdn, bool is_auto,
void *bin_malloc_no_fresh_slab(tsdn_t *tsdn, bool is_auto, bin_t *bin,
szind_t binind);
/* Slab queries. */
void *bin_current_slab_addr(tsdn_t *tsdn, bin_t *bin);
/* Bin selection. */
bin_t *bin_choose(tsdn_t *tsdn, arena_t *arena, szind_t binind,
unsigned *binshard_p);

View file

@ -211,6 +211,14 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
}
}
void *
arena_locality_hint(tsdn_t *tsdn, arena_t *arena, szind_t szind) {
assert(szind < SC_NBINS);
bin_t *bin = bin_choose(tsdn, arena, szind, NULL);
assert(bin != NULL);
return bin_current_slab_addr(tsdn, bin);
}
static void
arena_background_thread_inactivity_check(
tsdn_t *tsdn, arena_t *arena, bool is_background_thread) {

View file

@ -330,3 +330,16 @@ bin_choose(tsdn_t *tsdn, arena_t *arena, szind_t binind,
}
return arena_get_bin(arena, binind, binshard);
}
void *
bin_current_slab_addr(tsdn_t *tsdn, bin_t *bin) {
malloc_mutex_lock(tsdn, &bin->lock);
edata_t *slab = (bin->slabcur != NULL)
? bin->slabcur
: edata_heap_first(&bin->slabs_nonfull);
assert(slab != NULL || edata_heap_empty(&bin->slabs_nonfull));
void *ret = (slab != NULL) ? edata_addr_get(slab) : NULL;
assert(ret != NULL || slab == NULL);
malloc_mutex_unlock(tsdn, &bin->lock);
return ret;
}

View file

@ -203,26 +203,6 @@ tcache_gc_item_delay_compute(szind_t szind) {
return (uint8_t)item_delay;
}
static inline void *
tcache_gc_small_heuristic_addr_get(
tsd_t *tsd, tcache_slow_t *tcache_slow, szind_t szind) {
assert(szind < SC_NBINS);
tsdn_t *tsdn = tsd_tsdn(tsd);
bin_t *bin = bin_choose(tsdn, tcache_slow->arena, szind, NULL);
assert(bin != NULL);
malloc_mutex_lock(tsdn, &bin->lock);
edata_t *slab = (bin->slabcur == NULL)
? edata_heap_first(&bin->slabs_nonfull)
: bin->slabcur;
assert(slab != NULL || edata_heap_empty(&bin->slabs_nonfull));
void *ret = (slab != NULL) ? edata_addr_get(slab) : NULL;
assert(ret != NULL || slab == NULL);
malloc_mutex_unlock(tsdn, &bin->lock);
return ret;
}
static inline bool
tcache_gc_is_addr_remote(void *addr, uintptr_t min, uintptr_t max) {
assert(addr != NULL);
@ -417,9 +397,9 @@ tcache_gc_small(
goto label_flush;
}
/* Query arena binshard to get heuristic locality info. */
void *addr = tcache_gc_small_heuristic_addr_get(
tsd, tcache_slow, szind);
/* Query arena for a locality anchor (small-bin slab address). */
void *addr = arena_locality_hint(tsd_tsdn(tsd), tcache_slow->arena,
szind);
if (addr == NULL) {
goto label_flush;
}

View file

@ -683,6 +683,48 @@ TEST_BEGIN(test_bin_stats_nrequests_add) {
}
TEST_END
/*
* Test bin_current_slab_addr returns slabcur first, then falls back to the
* first nonfull slab, and NULL when both are empty.
*/
TEST_BEGIN(test_bin_current_slab_addr) {
tsdn_t *tsdn = tsdn_fetch();
bin_t bin;
szind_t binind = 0;
edata_t slab1, slab2;
bin_init(&bin);
/* Empty bin: returns NULL. */
expect_ptr_null(bin_current_slab_addr(tsdn, &bin),
"Empty bin should return NULL");
create_mock_slab(&slab1, binind, 0);
create_mock_slab(&slab2, binind, 1);
/* Only nonfull set: returns first-of-nonfull addr. */
bin_slabs_nonfull_insert(&bin, &slab1);
expect_ptr_eq(bin_current_slab_addr(tsdn, &bin),
edata_addr_get(&slab1),
"Should return nonfull-first addr when slabcur is NULL");
/* slabcur takes precedence over nonfull. */
bin.slabcur = &slab2;
expect_ptr_eq(bin_current_slab_addr(tsdn, &bin),
edata_addr_get(&slab2),
"Should return slabcur addr when set");
/* Only slabcur set, no nonfull. */
bin_slabs_nonfull_remove(&bin, &slab1);
expect_ptr_eq(bin_current_slab_addr(tsdn, &bin),
edata_addr_get(&slab2),
"Should still return slabcur addr after nonfull cleared");
free(edata_addr_get(&slab1));
free(edata_addr_get(&slab2));
}
TEST_END
/*
* Test bin_shard_sizes_boot and bin_update_shard_size.
*/
@ -856,6 +898,7 @@ main(void) {
test_bin_lower_slab_inserts_nonfull,
test_bin_dalloc_slab_prepare,
test_bin_stats_nrequests_add,
test_bin_current_slab_addr,
test_bin_shard_sizes,
test_bin_alloc_free_cycle,
test_bin_multi_size_class);