Add bytes counter to arena large stats.

To prepare for the upcoming usize changes, stats collected by
multiplying alive allocations and the bin size is no longer accurate.
Thus, add separate counters to record the bytes malloced and dalloced.
This commit is contained in:
guangli-dai 2024-03-25 15:53:59 -07:00 committed by Guangli Dai
parent bffe921ba0
commit 96b15d5d43
3 changed files with 41 additions and 8 deletions

View file

@ -14,12 +14,18 @@ JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
typedef struct arena_stats_large_s arena_stats_large_t;
struct arena_stats_large_s {
/*
* Total number of allocation/deallocation requests served directly by
* the arena.
* Total number of large allocation/deallocation requests served directly
* by the arena.
*/
locked_u64_t nmalloc;
locked_u64_t ndalloc;
/*
* Total large active bytes (allocated - deallocated) served directly
* by the arena.
*/
locked_u64_t active_bytes;
/*
* Number of allocation requests that correspond to this size class.
* This includes requests served by tcache, though tcache only

View file

@ -145,8 +145,18 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
assert(nmalloc - ndalloc <= SIZE_T_MAX);
size_t curlextents = (size_t)(nmalloc - ndalloc);
lstats[i].curlextents += curlextents;
astats->allocated_large +=
curlextents * sz_index2size(SC_NBINS + i);
if (config_limit_usize_gap) {
uint64_t active_bytes = locked_read_u64(tsdn,
LOCKEDINT_MTX(arena->stats.mtx),
&arena->stats.lstats[i].active_bytes);
locked_inc_u64_unsynchronized(
&lstats[i].active_bytes, active_bytes);
astats->allocated_large += active_bytes;
} else {
astats->allocated_large +=
curlextents * sz_index2size(SC_NBINS + i);
}
}
pa_shard_stats_merge(tsdn, &arena->pa_shard, &astats->pa_shard_stats,
@ -315,6 +325,11 @@ arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx),
&arena->stats.lstats[hindex].nmalloc, 1);
if (config_limit_usize_gap) {
locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx),
&arena->stats.lstats[hindex].active_bytes,
usize);
}
LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
}
}
@ -338,6 +353,11 @@ arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx),
&arena->stats.lstats[hindex].ndalloc, 1);
if (config_limit_usize_gap) {
locked_dec_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx),
&arena->stats.lstats[hindex].active_bytes,
usize);
}
LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
}
}

View file

@ -202,17 +202,22 @@ TEST_END
TEST_BEGIN(test_stats_arenas_large) {
void *p;
size_t sz, allocated;
size_t sz, allocated, allocated_before;
uint64_t epoch, nmalloc, ndalloc;
size_t malloc_size = (1U << (SC_LG_LARGE_MINCLASS + 1)) + 1;
int expected = config_stats ? 0 : ENOENT;
p = mallocx((1U << SC_LG_LARGE_MINCLASS), MALLOCX_ARENA(0));
sz = sizeof(size_t);
expect_d_eq(mallctl("stats.arenas.0.large.allocated",
(void *)&allocated_before, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
p = mallocx(malloc_size, MALLOCX_ARENA(0));
expect_ptr_not_null(p, "Unexpected mallocx() failure");
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
0, "Unexpected mallctl() failure");
sz = sizeof(size_t);
expect_d_eq(mallctl("stats.arenas.0.large.allocated",
(void *)&allocated, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
@ -223,8 +228,10 @@ TEST_BEGIN(test_stats_arenas_large) {
&sz, NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) {
expect_zu_gt(allocated, 0,
expect_zu_ge(allocated_before, 0,
"allocated should be greater than zero");
expect_zu_ge(allocated - allocated_before, sz_s2u(malloc_size),
"the diff between allocated should be greater than the allocation made");
expect_u64_gt(nmalloc, 0,
"nmalloc should be greater than zero");
expect_u64_ge(nmalloc, ndalloc,