From b2e59a96e1ffc953300c5b69ffae934a63de38c0 Mon Sep 17 00:00:00 2001 From: Dmitry Ilvokhin Date: Mon, 25 Mar 2024 08:02:49 -0700 Subject: [PATCH] Introduce getters for page allocator shard stats Access nactive, ndirty and nmuzzy throught getters and not directly. There are no functional change, but getters are required to propagate HPA's statistics up to Page Allocator's statitics. --- include/jemalloc/internal/pa.h | 4 ++++ src/pa.c | 2 +- src/pa_extra.c | 25 ++++++++++++++++++++----- 3 files changed, 25 insertions(+), 6 deletions(-) diff --git a/include/jemalloc/internal/pa.h b/include/jemalloc/internal/pa.h index 5f43244d..75626738 100644 --- a/include/jemalloc/internal/pa.h +++ b/include/jemalloc/internal/pa.h @@ -224,6 +224,10 @@ void pa_shard_prefork5(tsdn_t *tsdn, pa_shard_t *shard); void pa_shard_postfork_parent(tsdn_t *tsdn, pa_shard_t *shard); void pa_shard_postfork_child(tsdn_t *tsdn, pa_shard_t *shard); +size_t pa_shard_nactive(pa_shard_t *shard); +size_t pa_shard_ndirty(pa_shard_t *shard); +size_t pa_shard_nmuzzy(pa_shard_t *shard); + void pa_shard_basic_stats_merge(pa_shard_t *shard, size_t *nactive, size_t *ndirty, size_t *nmuzzy); diff --git a/src/pa.c b/src/pa.c index ebc6861d..7a24ae65 100644 --- a/src/pa.c +++ b/src/pa.c @@ -11,7 +11,7 @@ pa_nactive_add(pa_shard_t *shard, size_t add_pages) { static void pa_nactive_sub(pa_shard_t *shard, size_t sub_pages) { - assert(atomic_load_zu(&shard->nactive, ATOMIC_RELAXED) >= sub_pages); + assert(pa_shard_nactive(shard) >= sub_pages); atomic_fetch_sub_zu(&shard->nactive, sub_pages, ATOMIC_RELAXED); } diff --git a/src/pa_extra.c b/src/pa_extra.c index 0f488be6..ee101891 100644 --- a/src/pa_extra.c +++ b/src/pa_extra.c @@ -74,12 +74,27 @@ pa_shard_postfork_child(tsdn_t *tsdn, pa_shard_t *shard) { } } +size_t +pa_shard_nactive(pa_shard_t *shard) { + return atomic_load_zu(&shard->nactive, ATOMIC_RELAXED); +} + +size_t +pa_shard_ndirty(pa_shard_t *shard) { + return ecache_npages_get(&shard->pac.ecache_dirty); +} + +size_t +pa_shard_nmuzzy(pa_shard_t *shard) { + return ecache_npages_get(&shard->pac.ecache_muzzy); +} + void pa_shard_basic_stats_merge(pa_shard_t *shard, size_t *nactive, size_t *ndirty, size_t *nmuzzy) { - *nactive += atomic_load_zu(&shard->nactive, ATOMIC_RELAXED); - *ndirty += ecache_npages_get(&shard->pac.ecache_dirty); - *nmuzzy += ecache_npages_get(&shard->pac.ecache_muzzy); + *nactive += pa_shard_nactive(shard); + *ndirty += pa_shard_ndirty(shard); + *nmuzzy += pa_shard_nmuzzy(shard); } void @@ -95,8 +110,8 @@ pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard, &shard->edata_cache.count, ATOMIC_RELAXED); size_t resident_pgs = 0; - resident_pgs += atomic_load_zu(&shard->nactive, ATOMIC_RELAXED); - resident_pgs += ecache_npages_get(&shard->pac.ecache_dirty); + resident_pgs += pa_shard_nactive(shard); + resident_pgs += pa_shard_ndirty(shard); *resident += (resident_pgs << LG_PAGE); /* Dirty decay stats */