jemalloc/test/unit/arena_reset.c
guangli-dai c067a55c79 Introducing a new usize calculation policy
Converting size to usize is what jemalloc has been done by ceiling
size to the closest size class. However, this causes lots of memory
wastes with HPA enabled.  This commit changes how usize is calculated so
that the gap between two contiguous usize is no larger than a page.
Specifically, this commit includes the following changes:

1. Adding a build-time config option (--enable-limit-usize-gap) and a
runtime one (limit_usize_gap) to guard the changes.
When build-time
config is enabled, some minor CPU overhead is expected because usize
will be stored and accessed apart from index.  When runtime option is
also enabled (it can only be enabled with the build-time config
enabled). a new usize calculation approach wil be employed.  This new
calculation will ceil size to the closest multiple of PAGE for all sizes
larger than USIZE_GROW_SLOW_THRESHOLD instead of using the size classes.
Note when the build-time config is enabled, the runtime option is
default on.

2. Prepare tcache for size to grow by PAGE over GROUP*PAGE.
To prepare for the upcoming changes where size class grows by PAGE when
larger than NGROUP * PAGE, disable the tcache when it is larger than 2 *
NGROUP * PAGE. The threshold for tcache is set higher to prevent perf
regression as much as possible while usizes between NGROUP * PAGE and 2 *
NGROUP * PAGE happen to grow by PAGE.

3. Prepare pac and hpa psset for size to grow by PAGE over GROUP*PAGE
For PAC, to avoid having too many bins, arena bins still have the same
layout.  This means some extra search is needed for a page-level request that
is not aligned with the orginal size class: it should also search the heap
before the current index since the previous heap might also be able to
have some allocations satisfying it.  The same changes apply to HPA's
psset.
This search relies on the enumeration of the heap because not all allocs in
the previous heap are guaranteed to satisfy the request.  To balance the
memory and CPU overhead, we currently enumerate at most a fixed number
of nodes before concluding none can satisfy the request during an
enumeration.

4. Add bytes counter to arena large stats.
To prepare for the upcoming usize changes, stats collected by
multiplying alive allocations and the bin size is no longer accurate.
Thus, add separate counters to record the bytes malloced and dalloced.

5. Change structs use when freeing to avoid using index2size for large sizes.
  - Change the definition of emap_alloc_ctx_t
  - Change the read of both from edata_t.
  - Change the assignment and usage of emap_alloc_ctx_t.
  - Change other callsites of index2size.
Note for the changes in the data structure, i.e., emap_alloc_ctx_t,
will be used when the build-time config (--enable-limit-usize-gap) is
enabled but they will store the same value as index2size(szind) if the
runtime option (opt_limit_usize_gap) is not enabled.

6. Adapt hpa to the usize changes.
Change the settings in sec to limit is usage for sizes larger than
USIZE_GROW_SLOW_THRESHOLD and modify corresponding tests.

7. Modify usize calculation and corresponding tests.
Change the sz_s2u_compute. Note sz_index2size is not always safe now
while sz_size2index still works as expected.
2025-03-06 15:08:13 -08:00

362 lines
9.3 KiB
C

#ifndef ARENA_RESET_PROF_C_
#include "test/jemalloc_test.h"
#endif
#include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/rtree.h"
#include "test/extent_hooks.h"
static unsigned
get_nsizes_impl(const char *cmd) {
unsigned ret;
size_t z;
z = sizeof(unsigned);
expect_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
"Unexpected mallctl(\"%s\", ...) failure", cmd);
return ret;
}
static unsigned
get_nsmall(void) {
return get_nsizes_impl("arenas.nbins");
}
static unsigned
get_nlarge(void) {
return get_nsizes_impl("arenas.nlextents");
}
static size_t
get_size_impl(const char *cmd, size_t ind) {
size_t ret;
size_t z;
size_t mib[4];
size_t miblen = 4;
z = sizeof(size_t);
expect_d_eq(mallctlnametomib(cmd, mib, &miblen),
0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
mib[2] = ind;
z = sizeof(size_t);
expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
return ret;
}
static size_t
get_small_size(size_t ind) {
return get_size_impl("arenas.bin.0.size", ind);
}
static size_t
get_large_size(size_t ind) {
return get_size_impl("arenas.lextent.0.size", ind);
}
/* Like ivsalloc(), but safe to call on discarded allocations. */
static size_t
vsalloc(tsdn_t *tsdn, const void *ptr) {
emap_full_alloc_ctx_t full_alloc_ctx;
bool missing = emap_full_alloc_ctx_try_lookup(tsdn, &arena_emap_global,
ptr, &full_alloc_ctx);
if (missing) {
return 0;
}
if (full_alloc_ctx.edata == NULL) {
return 0;
}
if (edata_state_get(full_alloc_ctx.edata) != extent_state_active) {
return 0;
}
if (full_alloc_ctx.szind == SC_NSIZES) {
return 0;
}
return config_limit_usize_gap? edata_usize_get(full_alloc_ctx.edata):
sz_index2size(full_alloc_ctx.szind);
}
static unsigned
do_arena_create(extent_hooks_t *h) {
unsigned arena_ind;
size_t sz = sizeof(unsigned);
expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,
(void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0,
"Unexpected mallctl() failure");
return arena_ind;
}
static void
do_arena_reset_pre(unsigned arena_ind, void ***ptrs, unsigned *nptrs) {
#define NLARGE 32
unsigned nsmall, nlarge, i;
size_t sz;
int flags;
tsdn_t *tsdn;
flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
nsmall = get_nsmall();
nlarge = get_nlarge() > NLARGE ? NLARGE : get_nlarge();
*nptrs = nsmall + nlarge;
*ptrs = (void **)malloc(*nptrs * sizeof(void *));
expect_ptr_not_null(*ptrs, "Unexpected malloc() failure");
/* Allocate objects with a wide range of sizes. */
for (i = 0; i < nsmall; i++) {
sz = get_small_size(i);
(*ptrs)[i] = mallocx(sz, flags);
expect_ptr_not_null((*ptrs)[i],
"Unexpected mallocx(%zu, %#x) failure", sz, flags);
}
for (i = 0; i < nlarge; i++) {
sz = get_large_size(i);
(*ptrs)[nsmall + i] = mallocx(sz, flags);
expect_ptr_not_null((*ptrs)[i],
"Unexpected mallocx(%zu, %#x) failure", sz, flags);
}
tsdn = tsdn_fetch();
/* Verify allocations. */
for (i = 0; i < *nptrs; i++) {
expect_zu_gt(ivsalloc(tsdn, (*ptrs)[i]), 0,
"Allocation should have queryable size");
}
}
static void
do_arena_reset_post(void **ptrs, unsigned nptrs, unsigned arena_ind) {
tsdn_t *tsdn;
unsigned i;
tsdn = tsdn_fetch();
if (have_background_thread) {
malloc_mutex_lock(tsdn,
&background_thread_info_get(arena_ind)->mtx);
}
/* Verify allocations no longer exist. */
for (i = 0; i < nptrs; i++) {
expect_zu_eq(vsalloc(tsdn, ptrs[i]), 0,
"Allocation should no longer exist");
}
if (have_background_thread) {
malloc_mutex_unlock(tsdn,
&background_thread_info_get(arena_ind)->mtx);
}
free(ptrs);
}
static void
do_arena_reset_destroy(const char *name, unsigned arena_ind) {
size_t mib[3];
size_t miblen;
miblen = sizeof(mib)/sizeof(size_t);
expect_d_eq(mallctlnametomib(name, mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
mib[1] = (size_t)arena_ind;
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
"Unexpected mallctlbymib() failure");
}
static void
do_arena_reset(unsigned arena_ind) {
do_arena_reset_destroy("arena.0.reset", arena_ind);
}
static void
do_arena_destroy(unsigned arena_ind) {
do_arena_reset_destroy("arena.0.destroy", arena_ind);
}
TEST_BEGIN(test_arena_reset) {
unsigned arena_ind;
void **ptrs;
unsigned nptrs;
arena_ind = do_arena_create(NULL);
do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
do_arena_reset(arena_ind);
do_arena_reset_post(ptrs, nptrs, arena_ind);
}
TEST_END
static bool
arena_i_initialized(unsigned arena_ind, bool refresh) {
bool initialized;
size_t mib[3];
size_t miblen, sz;
if (refresh) {
uint64_t epoch = 1;
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
sizeof(epoch)), 0, "Unexpected mallctl() failure");
}
miblen = sizeof(mib)/sizeof(size_t);
expect_d_eq(mallctlnametomib("arena.0.initialized", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
mib[1] = (size_t)arena_ind;
sz = sizeof(initialized);
expect_d_eq(mallctlbymib(mib, miblen, (void *)&initialized, &sz, NULL,
0), 0, "Unexpected mallctlbymib() failure");
return initialized;
}
TEST_BEGIN(test_arena_destroy_initial) {
expect_false(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
"Destroyed arena stats should not be initialized");
}
TEST_END
TEST_BEGIN(test_arena_destroy_hooks_default) {
unsigned arena_ind, arena_ind_another, arena_ind_prev;
void **ptrs;
unsigned nptrs;
arena_ind = do_arena_create(NULL);
do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
expect_false(arena_i_initialized(arena_ind, false),
"Arena stats should not be initialized");
expect_true(arena_i_initialized(arena_ind, true),
"Arena stats should be initialized");
/*
* Create another arena before destroying one, to better verify arena
* index reuse.
*/
arena_ind_another = do_arena_create(NULL);
do_arena_destroy(arena_ind);
expect_false(arena_i_initialized(arena_ind, true),
"Arena stats should not be initialized");
expect_true(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
"Destroyed arena stats should be initialized");
do_arena_reset_post(ptrs, nptrs, arena_ind);
arena_ind_prev = arena_ind;
arena_ind = do_arena_create(NULL);
do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
expect_u_eq(arena_ind, arena_ind_prev,
"Arena index should have been recycled");
do_arena_destroy(arena_ind);
do_arena_reset_post(ptrs, nptrs, arena_ind);
do_arena_destroy(arena_ind_another);
/* Try arena.create with custom hooks. */
size_t sz = sizeof(extent_hooks_t *);
extent_hooks_t *a0_default_hooks;
expect_d_eq(mallctl("arena.0.extent_hooks", (void *)&a0_default_hooks,
&sz, NULL, 0), 0, "Unexpected mallctlnametomib() failure");
/* Default impl; but wrapped as "customized". */
extent_hooks_t new_hooks = *a0_default_hooks;
extent_hooks_t *hook = &new_hooks;
sz = sizeof(unsigned);
expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,
(void *)&hook, sizeof(void *)), 0,
"Unexpected mallctl() failure");
do_arena_destroy(arena_ind);
}
TEST_END
/*
* Actually unmap extents, regardless of opt_retain, so that attempts to access
* a destroyed arena's memory will segfault.
*/
static bool
extent_dalloc_unmap(extent_hooks_t *extent_hooks, void *addr, size_t size,
bool committed, unsigned arena_ind) {
TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, "
"arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ?
"true" : "false", arena_ind);
expect_ptr_eq(extent_hooks, &hooks,
"extent_hooks should be same as pointer used to set hooks");
expect_ptr_eq(extent_hooks->dalloc, extent_dalloc_unmap,
"Wrong hook function");
called_dalloc = true;
if (!try_dalloc) {
return true;
}
did_dalloc = true;
if (!maps_coalesce && opt_retain) {
return true;
}
pages_unmap(addr, size);
return false;
}
static extent_hooks_t hooks_orig;
static extent_hooks_t hooks_unmap = {
extent_alloc_hook,
extent_dalloc_unmap, /* dalloc */
extent_destroy_hook,
extent_commit_hook,
extent_decommit_hook,
extent_purge_lazy_hook,
extent_purge_forced_hook,
extent_split_hook,
extent_merge_hook
};
TEST_BEGIN(test_arena_destroy_hooks_unmap) {
unsigned arena_ind;
void **ptrs;
unsigned nptrs;
extent_hooks_prep();
if (maps_coalesce) {
try_decommit = false;
}
memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t));
memcpy(&hooks, &hooks_unmap, sizeof(extent_hooks_t));
did_alloc = false;
arena_ind = do_arena_create(&hooks);
do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
expect_true(did_alloc, "Expected alloc");
expect_false(arena_i_initialized(arena_ind, false),
"Arena stats should not be initialized");
expect_true(arena_i_initialized(arena_ind, true),
"Arena stats should be initialized");
did_dalloc = false;
do_arena_destroy(arena_ind);
expect_true(did_dalloc, "Expected dalloc");
expect_false(arena_i_initialized(arena_ind, true),
"Arena stats should not be initialized");
expect_true(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
"Destroyed arena stats should be initialized");
do_arena_reset_post(ptrs, nptrs, arena_ind);
memcpy(&hooks, &hooks_orig, sizeof(extent_hooks_t));
}
TEST_END
int
main(void) {
return test(
test_arena_reset,
test_arena_destroy_initial,
test_arena_destroy_hooks_default,
test_arena_destroy_hooks_unmap);
}