Introduce getters for page allocator shard stats

Access nactive, ndirty and nmuzzy throught getters and not directly.
There are no functional change, but getters are required to propagate
HPA's statistics up to Page Allocator's statitics.
This commit is contained in:
Dmitry Ilvokhin 2024-03-25 08:02:49 -07:00 committed by Qi Wang
parent 92aa52c062
commit b2e59a96e1
3 changed files with 25 additions and 6 deletions

View file

@ -224,6 +224,10 @@ void pa_shard_prefork5(tsdn_t *tsdn, pa_shard_t *shard);
void pa_shard_postfork_parent(tsdn_t *tsdn, pa_shard_t *shard);
void pa_shard_postfork_child(tsdn_t *tsdn, pa_shard_t *shard);
size_t pa_shard_nactive(pa_shard_t *shard);
size_t pa_shard_ndirty(pa_shard_t *shard);
size_t pa_shard_nmuzzy(pa_shard_t *shard);
void pa_shard_basic_stats_merge(pa_shard_t *shard, size_t *nactive,
size_t *ndirty, size_t *nmuzzy);

View file

@ -11,7 +11,7 @@ pa_nactive_add(pa_shard_t *shard, size_t add_pages) {
static void
pa_nactive_sub(pa_shard_t *shard, size_t sub_pages) {
assert(atomic_load_zu(&shard->nactive, ATOMIC_RELAXED) >= sub_pages);
assert(pa_shard_nactive(shard) >= sub_pages);
atomic_fetch_sub_zu(&shard->nactive, sub_pages, ATOMIC_RELAXED);
}

View file

@ -74,12 +74,27 @@ pa_shard_postfork_child(tsdn_t *tsdn, pa_shard_t *shard) {
}
}
size_t
pa_shard_nactive(pa_shard_t *shard) {
return atomic_load_zu(&shard->nactive, ATOMIC_RELAXED);
}
size_t
pa_shard_ndirty(pa_shard_t *shard) {
return ecache_npages_get(&shard->pac.ecache_dirty);
}
size_t
pa_shard_nmuzzy(pa_shard_t *shard) {
return ecache_npages_get(&shard->pac.ecache_muzzy);
}
void
pa_shard_basic_stats_merge(pa_shard_t *shard, size_t *nactive, size_t *ndirty,
size_t *nmuzzy) {
*nactive += atomic_load_zu(&shard->nactive, ATOMIC_RELAXED);
*ndirty += ecache_npages_get(&shard->pac.ecache_dirty);
*nmuzzy += ecache_npages_get(&shard->pac.ecache_muzzy);
*nactive += pa_shard_nactive(shard);
*ndirty += pa_shard_ndirty(shard);
*nmuzzy += pa_shard_nmuzzy(shard);
}
void
@ -95,8 +110,8 @@ pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard,
&shard->edata_cache.count, ATOMIC_RELAXED);
size_t resident_pgs = 0;
resident_pgs += atomic_load_zu(&shard->nactive, ATOMIC_RELAXED);
resident_pgs += ecache_npages_get(&shard->pac.ecache_dirty);
resident_pgs += pa_shard_nactive(shard);
resident_pgs += pa_shard_ndirty(shard);
*resident += (resident_pgs << LG_PAGE);
/* Dirty decay stats */