Encapsulate cache_bin_array_descriptor queue ops behind arena helpers

tcache.c was reaching into arena->cache_bin_array_descriptor_ql{,_mtx}
directly to register / unregister / postfork-relink its descriptor.
That queue and mutex are owned by arena, so the locking and ql_*
operations belong in arena.c.
This commit is contained in:
Slobodan Predolac 2026-05-08 10:10:04 -07:00
parent 36820f9b76
commit 35d102fa32
3 changed files with 74 additions and 38 deletions

View file

@ -112,6 +112,12 @@ size_t arena_fill_small_fresh(tsdn_t *tsdn, arena_t *arena, szind_t binind,
void **ptrs, size_t nfill, bool zero);
bool arena_boot(sc_data_t *sc_data, base_t *base, bool hpa);
void *arena_locality_hint(tsdn_t *tsdn, arena_t *arena, szind_t szind);
void arena_cache_bin_array_register(tsdn_t *tsdn, arena_t *arena,
cache_bin_array_descriptor_t *desc);
void arena_cache_bin_array_unregister(tsdn_t *tsdn, arena_t *arena,
cache_bin_array_descriptor_t *desc);
void arena_cache_bin_array_postfork_child(arena_t *arena,
cache_bin_array_descriptor_t *desc_or_null);
void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
void arena_prefork2(tsdn_t *tsdn, arena_t *arena);

View file

@ -219,6 +219,59 @@ arena_locality_hint(tsdn_t *tsdn, arena_t *arena, szind_t szind) {
return bin_current_slab_addr(tsdn, bin);
}
void
arena_cache_bin_array_register(tsdn_t *tsdn, arena_t *arena,
cache_bin_array_descriptor_t *desc) {
cassert(config_stats);
malloc_mutex_lock(tsdn, &arena->cache_bin_array_descriptor_ql_mtx);
ql_tail_insert(&arena->cache_bin_array_descriptor_ql, desc, link);
malloc_mutex_unlock(tsdn, &arena->cache_bin_array_descriptor_ql_mtx);
}
void
arena_cache_bin_array_unregister(tsdn_t *tsdn, arena_t *arena,
cache_bin_array_descriptor_t *desc) {
cassert(config_stats);
malloc_mutex_lock(tsdn, &arena->cache_bin_array_descriptor_ql_mtx);
if (config_debug) {
bool in_ql = false;
cache_bin_array_descriptor_t *iter;
ql_foreach (iter, &arena->cache_bin_array_descriptor_ql, link) {
if (iter == desc) {
in_ql = true;
break;
}
}
assert(in_ql);
}
ql_remove(&arena->cache_bin_array_descriptor_ql, desc, link);
/*
* Flush this descriptor's per-cache_bin request counts up to the
* arena's bin/large stats before the owner forgets which arena
* accumulated them. (Step 2 of this refactor will relocate
* tcache_stats_merge into cache_bin.c.)
*/
tcache_stats_merge(tsdn, desc, arena);
malloc_mutex_unlock(tsdn, &arena->cache_bin_array_descriptor_ql_mtx);
}
/*
* Postfork-child entry: child is single-threaded and the queue is rebuilt
* from scratch (descriptors held by other threads at fork time are gone).
* The mutex itself is reinitialized later in arena_postfork_child, so we
* cannot lock here.
*/
void
arena_cache_bin_array_postfork_child(arena_t *arena,
cache_bin_array_descriptor_t *desc_or_null) {
cassert(config_stats);
ql_new(&arena->cache_bin_array_descriptor_ql);
if (desc_or_null != NULL) {
ql_tail_insert(&arena->cache_bin_array_descriptor_ql,
desc_or_null, link);
}
}
static void
arena_background_thread_inactivity_check(
tsdn_t *tsdn, arena_t *arena, bool is_background_thread) {

View file

@ -720,30 +720,19 @@ tcache_bin_ncached_max_read(
return false;
}
/*
* Caller must hold arena->cache_bin_array_descriptor_ql_mtx, OR be in the postfork-child path
* (single-threaded, mutex re-init pending).
*/
static void
tcache_arena_link(arena_t *arena, tcache_slow_t *tcache_slow) {
assert(tcache_slow->tcache != NULL);
cache_bin_array_descriptor_init(
&tcache_slow->cache_bin_array_descriptor,
tcache_slow->tcache->bins);
ql_tail_insert(&arena->cache_bin_array_descriptor_ql,
&tcache_slow->cache_bin_array_descriptor, link);
}
void
tcache_arena_associate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
arena_t *arena) {
assert(tcache_slow->arena == NULL);
assert(tcache_slow->tcache != NULL);
tcache_slow->arena = arena;
if (config_stats) {
malloc_mutex_lock(tsdn, &arena->cache_bin_array_descriptor_ql_mtx);
tcache_arena_link(arena, tcache_slow);
malloc_mutex_unlock(tsdn, &arena->cache_bin_array_descriptor_ql_mtx);
cache_bin_array_descriptor_init(
&tcache_slow->cache_bin_array_descriptor,
tcache_slow->tcache->bins);
arena_cache_bin_array_register(tsdn, arena,
&tcache_slow->cache_bin_array_descriptor);
}
}
@ -752,25 +741,8 @@ tcache_arena_dissociate(tsdn_t *tsdn, tcache_slow_t *tcache_slow) {
arena_t *arena = tcache_slow->arena;
assert(arena != NULL);
if (config_stats) {
malloc_mutex_lock(tsdn, &arena->cache_bin_array_descriptor_ql_mtx);
if (config_debug) {
bool in_ql = false;
cache_bin_array_descriptor_t *iter;
ql_foreach (iter,
&arena->cache_bin_array_descriptor_ql, link) {
if (iter ==
&tcache_slow->cache_bin_array_descriptor) {
in_ql = true;
break;
}
}
assert(in_ql);
}
ql_remove(&arena->cache_bin_array_descriptor_ql,
&tcache_slow->cache_bin_array_descriptor, link);
tcache_stats_merge(tsdn,
&tcache_slow->cache_bin_array_descriptor, arena);
malloc_mutex_unlock(tsdn, &arena->cache_bin_array_descriptor_ql_mtx);
arena_cache_bin_array_unregister(tsdn, arena,
&tcache_slow->cache_bin_array_descriptor);
}
tcache_slow->arena = NULL;
}
@ -787,11 +759,16 @@ tcache_arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
if (!config_stats) {
return;
}
ql_new(&arena->cache_bin_array_descriptor_ql);
cache_bin_array_descriptor_t *desc = NULL;
tcache_slow_t *tcache_slow = tcache_slow_get(tsdn_tsd(tsdn));
if (tcache_slow != NULL && tcache_slow->arena == arena) {
tcache_arena_link(arena, tcache_slow);
assert(tcache_slow->tcache != NULL);
cache_bin_array_descriptor_init(
&tcache_slow->cache_bin_array_descriptor,
tcache_slow->tcache->bins);
desc = &tcache_slow->cache_bin_array_descriptor;
}
arena_cache_bin_array_postfork_child(arena, desc);
}
static void