mirror of
https://github.com/jemalloc/jemalloc.git
synced 2026-04-14 22:51:50 +03:00
Converting size to usize is what jemalloc has been done by ceiling size to the closest size class. However, this causes lots of memory wastes with HPA enabled. This commit changes how usize is calculated so that the gap between two contiguous usize is no larger than a page. Specifically, this commit includes the following changes: 1. Adding a build-time config option (--enable-limit-usize-gap) and a runtime one (limit_usize_gap) to guard the changes. When build-time config is enabled, some minor CPU overhead is expected because usize will be stored and accessed apart from index. When runtime option is also enabled (it can only be enabled with the build-time config enabled). a new usize calculation approach wil be employed. This new calculation will ceil size to the closest multiple of PAGE for all sizes larger than USIZE_GROW_SLOW_THRESHOLD instead of using the size classes. Note when the build-time config is enabled, the runtime option is default on. 2. Prepare tcache for size to grow by PAGE over GROUP*PAGE. To prepare for the upcoming changes where size class grows by PAGE when larger than NGROUP * PAGE, disable the tcache when it is larger than 2 * NGROUP * PAGE. The threshold for tcache is set higher to prevent perf regression as much as possible while usizes between NGROUP * PAGE and 2 * NGROUP * PAGE happen to grow by PAGE. 3. Prepare pac and hpa psset for size to grow by PAGE over GROUP*PAGE For PAC, to avoid having too many bins, arena bins still have the same layout. This means some extra search is needed for a page-level request that is not aligned with the orginal size class: it should also search the heap before the current index since the previous heap might also be able to have some allocations satisfying it. The same changes apply to HPA's psset. This search relies on the enumeration of the heap because not all allocs in the previous heap are guaranteed to satisfy the request. To balance the memory and CPU overhead, we currently enumerate at most a fixed number of nodes before concluding none can satisfy the request during an enumeration. 4. Add bytes counter to arena large stats. To prepare for the upcoming usize changes, stats collected by multiplying alive allocations and the bin size is no longer accurate. Thus, add separate counters to record the bytes malloced and dalloced. 5. Change structs use when freeing to avoid using index2size for large sizes. - Change the definition of emap_alloc_ctx_t - Change the read of both from edata_t. - Change the assignment and usage of emap_alloc_ctx_t. - Change other callsites of index2size. Note for the changes in the data structure, i.e., emap_alloc_ctx_t, will be used when the build-time config (--enable-limit-usize-gap) is enabled but they will store the same value as index2size(szind) if the runtime option (opt_limit_usize_gap) is not enabled. 6. Adapt hpa to the usize changes. Change the settings in sec to limit is usage for sizes larger than USIZE_GROW_SLOW_THRESHOLD and modify corresponding tests. 7. Modify usize calculation and corresponding tests. Change the sz_s2u_compute. Note sz_index2size is not always safe now while sz_size2index still works as expected.
449 lines
15 KiB
C
449 lines
15 KiB
C
#include "test/jemalloc_test.h"
|
|
|
|
#define STRINGIFY_HELPER(x) #x
|
|
#define STRINGIFY(x) STRINGIFY_HELPER(x)
|
|
|
|
TEST_BEGIN(test_stats_summary) {
|
|
size_t sz, allocated, active, resident, mapped,
|
|
metadata, metadata_edata, metadata_rtree;
|
|
int expected = config_stats ? 0 : ENOENT;
|
|
|
|
sz = sizeof(size_t);
|
|
expect_d_eq(mallctl("stats.allocated", (void *)&allocated, &sz, NULL,
|
|
0), expected, "Unexpected mallctl() result");
|
|
expect_d_eq(mallctl("stats.active", (void *)&active, &sz, NULL, 0),
|
|
expected, "Unexpected mallctl() result");
|
|
expect_d_eq(mallctl("stats.resident", (void *)&resident, &sz, NULL, 0),
|
|
expected, "Unexpected mallctl() result");
|
|
expect_d_eq(mallctl("stats.mapped", (void *)&mapped, &sz, NULL, 0),
|
|
expected, "Unexpected mallctl() result");
|
|
|
|
expect_d_eq(mallctl("stats.metadata", (void *)&metadata, &sz, NULL, 0),
|
|
expected, "Unexpected mallctl() result");
|
|
expect_d_eq(mallctl("stats.metadata_edata", (void *)&metadata_edata,
|
|
&sz, NULL, 0), expected, "Unexpected mallctl() result");
|
|
expect_d_eq(mallctl("stats.metadata_rtree", (void *)&metadata_rtree,
|
|
&sz, NULL, 0), expected, "Unexpected mallctl() result");
|
|
|
|
if (config_stats) {
|
|
expect_zu_le(allocated, active,
|
|
"allocated should be no larger than active");
|
|
expect_zu_lt(active, resident,
|
|
"active should be less than resident");
|
|
expect_zu_lt(active, mapped,
|
|
"active should be less than mapped");
|
|
expect_zu_le(metadata_edata + metadata_rtree, metadata,
|
|
"the sum of metadata_edata and metadata_rtree "
|
|
"should be no larger than metadata");
|
|
}
|
|
}
|
|
TEST_END
|
|
|
|
TEST_BEGIN(test_stats_large) {
|
|
void *p;
|
|
uint64_t epoch;
|
|
size_t allocated;
|
|
uint64_t nmalloc, ndalloc, nrequests;
|
|
size_t sz;
|
|
int expected = config_stats ? 0 : ENOENT;
|
|
|
|
p = mallocx(SC_SMALL_MAXCLASS + 1, MALLOCX_ARENA(0));
|
|
expect_ptr_not_null(p, "Unexpected mallocx() failure");
|
|
|
|
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
|
|
0, "Unexpected mallctl() failure");
|
|
|
|
sz = sizeof(size_t);
|
|
expect_d_eq(mallctl("stats.arenas.0.large.allocated",
|
|
(void *)&allocated, &sz, NULL, 0), expected,
|
|
"Unexpected mallctl() result");
|
|
sz = sizeof(uint64_t);
|
|
expect_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc,
|
|
&sz, NULL, 0), expected, "Unexpected mallctl() result");
|
|
expect_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc,
|
|
&sz, NULL, 0), expected, "Unexpected mallctl() result");
|
|
expect_d_eq(mallctl("stats.arenas.0.large.nrequests",
|
|
(void *)&nrequests, &sz, NULL, 0), expected,
|
|
"Unexpected mallctl() result");
|
|
|
|
if (config_stats) {
|
|
expect_zu_gt(allocated, 0,
|
|
"allocated should be greater than zero");
|
|
expect_u64_ge(nmalloc, ndalloc,
|
|
"nmalloc should be at least as large as ndalloc");
|
|
expect_u64_le(nmalloc, nrequests,
|
|
"nmalloc should no larger than nrequests");
|
|
}
|
|
|
|
dallocx(p, 0);
|
|
}
|
|
TEST_END
|
|
|
|
TEST_BEGIN(test_stats_arenas_summary) {
|
|
void *little, *large;
|
|
uint64_t epoch;
|
|
size_t sz;
|
|
int expected = config_stats ? 0 : ENOENT;
|
|
size_t mapped;
|
|
uint64_t dirty_npurge, dirty_nmadvise, dirty_purged;
|
|
uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged;
|
|
|
|
little = mallocx(SC_SMALL_MAXCLASS, MALLOCX_ARENA(0));
|
|
expect_ptr_not_null(little, "Unexpected mallocx() failure");
|
|
large = mallocx((1U << SC_LG_LARGE_MINCLASS),
|
|
MALLOCX_ARENA(0));
|
|
expect_ptr_not_null(large, "Unexpected mallocx() failure");
|
|
|
|
dallocx(little, 0);
|
|
dallocx(large, 0);
|
|
|
|
expect_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
|
|
opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result");
|
|
expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
|
|
"Unexpected mallctl() failure");
|
|
|
|
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
|
|
0, "Unexpected mallctl() failure");
|
|
|
|
sz = sizeof(size_t);
|
|
expect_d_eq(mallctl("stats.arenas.0.mapped", (void *)&mapped, &sz, NULL,
|
|
0), expected, "Unexepected mallctl() result");
|
|
|
|
sz = sizeof(uint64_t);
|
|
expect_d_eq(mallctl("stats.arenas.0.dirty_npurge",
|
|
(void *)&dirty_npurge, &sz, NULL, 0), expected,
|
|
"Unexepected mallctl() result");
|
|
expect_d_eq(mallctl("stats.arenas.0.dirty_nmadvise",
|
|
(void *)&dirty_nmadvise, &sz, NULL, 0), expected,
|
|
"Unexepected mallctl() result");
|
|
expect_d_eq(mallctl("stats.arenas.0.dirty_purged",
|
|
(void *)&dirty_purged, &sz, NULL, 0), expected,
|
|
"Unexepected mallctl() result");
|
|
expect_d_eq(mallctl("stats.arenas.0.muzzy_npurge",
|
|
(void *)&muzzy_npurge, &sz, NULL, 0), expected,
|
|
"Unexepected mallctl() result");
|
|
expect_d_eq(mallctl("stats.arenas.0.muzzy_nmadvise",
|
|
(void *)&muzzy_nmadvise, &sz, NULL, 0), expected,
|
|
"Unexepected mallctl() result");
|
|
expect_d_eq(mallctl("stats.arenas.0.muzzy_purged",
|
|
(void *)&muzzy_purged, &sz, NULL, 0), expected,
|
|
"Unexepected mallctl() result");
|
|
|
|
if (config_stats) {
|
|
if (!is_background_thread_enabled() && !opt_hpa) {
|
|
expect_u64_gt(dirty_npurge + muzzy_npurge, 0,
|
|
"At least one purge should have occurred");
|
|
}
|
|
expect_u64_le(dirty_nmadvise, dirty_purged,
|
|
"dirty_nmadvise should be no greater than dirty_purged");
|
|
expect_u64_le(muzzy_nmadvise, muzzy_purged,
|
|
"muzzy_nmadvise should be no greater than muzzy_purged");
|
|
}
|
|
}
|
|
TEST_END
|
|
|
|
void *
|
|
thd_start(void *arg) {
|
|
return NULL;
|
|
}
|
|
|
|
static void
|
|
no_lazy_lock(void) {
|
|
thd_t thd;
|
|
|
|
thd_create(&thd, thd_start, NULL);
|
|
thd_join(thd, NULL);
|
|
}
|
|
|
|
TEST_BEGIN(test_stats_arenas_small) {
|
|
void *p;
|
|
size_t sz, allocated;
|
|
uint64_t epoch, nmalloc, ndalloc, nrequests;
|
|
int expected = config_stats ? 0 : ENOENT;
|
|
|
|
no_lazy_lock(); /* Lazy locking would dodge tcache testing. */
|
|
|
|
p = mallocx(SC_SMALL_MAXCLASS, MALLOCX_ARENA(0));
|
|
expect_ptr_not_null(p, "Unexpected mallocx() failure");
|
|
|
|
expect_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
|
|
opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result");
|
|
|
|
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
|
|
0, "Unexpected mallctl() failure");
|
|
|
|
sz = sizeof(size_t);
|
|
expect_d_eq(mallctl("stats.arenas.0.small.allocated",
|
|
(void *)&allocated, &sz, NULL, 0), expected,
|
|
"Unexpected mallctl() result");
|
|
sz = sizeof(uint64_t);
|
|
expect_d_eq(mallctl("stats.arenas.0.small.nmalloc", (void *)&nmalloc,
|
|
&sz, NULL, 0), expected, "Unexpected mallctl() result");
|
|
expect_d_eq(mallctl("stats.arenas.0.small.ndalloc", (void *)&ndalloc,
|
|
&sz, NULL, 0), expected, "Unexpected mallctl() result");
|
|
expect_d_eq(mallctl("stats.arenas.0.small.nrequests",
|
|
(void *)&nrequests, &sz, NULL, 0), expected,
|
|
"Unexpected mallctl() result");
|
|
|
|
if (config_stats) {
|
|
expect_zu_gt(allocated, 0,
|
|
"allocated should be greater than zero");
|
|
expect_u64_gt(nmalloc, 0,
|
|
"nmalloc should be no greater than zero");
|
|
expect_u64_ge(nmalloc, ndalloc,
|
|
"nmalloc should be at least as large as ndalloc");
|
|
expect_u64_gt(nrequests, 0,
|
|
"nrequests should be greater than zero");
|
|
}
|
|
|
|
dallocx(p, 0);
|
|
}
|
|
TEST_END
|
|
|
|
TEST_BEGIN(test_stats_arenas_large) {
|
|
void *p;
|
|
size_t sz, allocated, allocated_before;
|
|
uint64_t epoch, nmalloc, ndalloc;
|
|
size_t malloc_size = (1U << (SC_LG_LARGE_MINCLASS + 1)) + 1;
|
|
int expected = config_stats ? 0 : ENOENT;
|
|
|
|
sz = sizeof(size_t);
|
|
expect_d_eq(mallctl("stats.arenas.0.large.allocated",
|
|
(void *)&allocated_before, &sz, NULL, 0), expected,
|
|
"Unexpected mallctl() result");
|
|
|
|
p = mallocx(malloc_size, MALLOCX_ARENA(0));
|
|
expect_ptr_not_null(p, "Unexpected mallocx() failure");
|
|
|
|
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
|
|
0, "Unexpected mallctl() failure");
|
|
|
|
expect_d_eq(mallctl("stats.arenas.0.large.allocated",
|
|
(void *)&allocated, &sz, NULL, 0), expected,
|
|
"Unexpected mallctl() result");
|
|
sz = sizeof(uint64_t);
|
|
expect_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc,
|
|
&sz, NULL, 0), expected, "Unexpected mallctl() result");
|
|
expect_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc,
|
|
&sz, NULL, 0), expected, "Unexpected mallctl() result");
|
|
|
|
if (config_stats) {
|
|
expect_zu_ge(allocated_before, 0,
|
|
"allocated should be greater than zero");
|
|
expect_zu_ge(allocated - allocated_before, sz_s2u(malloc_size),
|
|
"the diff between allocated should be greater than the allocation made");
|
|
expect_u64_gt(nmalloc, 0,
|
|
"nmalloc should be greater than zero");
|
|
expect_u64_ge(nmalloc, ndalloc,
|
|
"nmalloc should be at least as large as ndalloc");
|
|
}
|
|
|
|
dallocx(p, 0);
|
|
}
|
|
TEST_END
|
|
|
|
static void
|
|
gen_mallctl_str(char *cmd, char *name, unsigned arena_ind) {
|
|
sprintf(cmd, "stats.arenas.%u.bins.0.%s", arena_ind, name);
|
|
}
|
|
|
|
TEST_BEGIN(test_stats_arenas_bins) {
|
|
void *p;
|
|
size_t sz, curslabs, curregs, nonfull_slabs;
|
|
uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes;
|
|
uint64_t nslabs, nreslabs;
|
|
int expected = config_stats ? 0 : ENOENT;
|
|
|
|
/* Make sure allocation below isn't satisfied by tcache. */
|
|
expect_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
|
|
opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result");
|
|
|
|
unsigned arena_ind, old_arena_ind;
|
|
sz = sizeof(unsigned);
|
|
expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
|
|
0, "Arena creation failure");
|
|
sz = sizeof(arena_ind);
|
|
expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
|
|
(void *)&arena_ind, sizeof(arena_ind)), 0,
|
|
"Unexpected mallctl() failure");
|
|
|
|
p = malloc(bin_infos[0].reg_size);
|
|
expect_ptr_not_null(p, "Unexpected malloc() failure");
|
|
|
|
expect_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
|
|
opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result");
|
|
|
|
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
|
|
0, "Unexpected mallctl() failure");
|
|
|
|
char cmd[128];
|
|
sz = sizeof(uint64_t);
|
|
gen_mallctl_str(cmd, "nmalloc", arena_ind);
|
|
expect_d_eq(mallctl(cmd, (void *)&nmalloc, &sz, NULL, 0), expected,
|
|
"Unexpected mallctl() result");
|
|
gen_mallctl_str(cmd, "ndalloc", arena_ind);
|
|
expect_d_eq(mallctl(cmd, (void *)&ndalloc, &sz, NULL, 0), expected,
|
|
"Unexpected mallctl() result");
|
|
gen_mallctl_str(cmd, "nrequests", arena_ind);
|
|
expect_d_eq(mallctl(cmd, (void *)&nrequests, &sz, NULL, 0), expected,
|
|
"Unexpected mallctl() result");
|
|
sz = sizeof(size_t);
|
|
gen_mallctl_str(cmd, "curregs", arena_ind);
|
|
expect_d_eq(mallctl(cmd, (void *)&curregs, &sz, NULL, 0), expected,
|
|
"Unexpected mallctl() result");
|
|
|
|
sz = sizeof(uint64_t);
|
|
gen_mallctl_str(cmd, "nfills", arena_ind);
|
|
expect_d_eq(mallctl(cmd, (void *)&nfills, &sz, NULL, 0), expected,
|
|
"Unexpected mallctl() result");
|
|
gen_mallctl_str(cmd, "nflushes", arena_ind);
|
|
expect_d_eq(mallctl(cmd, (void *)&nflushes, &sz, NULL, 0), expected,
|
|
"Unexpected mallctl() result");
|
|
|
|
gen_mallctl_str(cmd, "nslabs", arena_ind);
|
|
expect_d_eq(mallctl(cmd, (void *)&nslabs, &sz, NULL, 0), expected,
|
|
"Unexpected mallctl() result");
|
|
gen_mallctl_str(cmd, "nreslabs", arena_ind);
|
|
expect_d_eq(mallctl(cmd, (void *)&nreslabs, &sz, NULL, 0), expected,
|
|
"Unexpected mallctl() result");
|
|
sz = sizeof(size_t);
|
|
gen_mallctl_str(cmd, "curslabs", arena_ind);
|
|
expect_d_eq(mallctl(cmd, (void *)&curslabs, &sz, NULL, 0), expected,
|
|
"Unexpected mallctl() result");
|
|
gen_mallctl_str(cmd, "nonfull_slabs", arena_ind);
|
|
expect_d_eq(mallctl(cmd, (void *)&nonfull_slabs, &sz, NULL, 0),
|
|
expected, "Unexpected mallctl() result");
|
|
|
|
if (config_stats) {
|
|
expect_u64_gt(nmalloc, 0,
|
|
"nmalloc should be greater than zero");
|
|
expect_u64_ge(nmalloc, ndalloc,
|
|
"nmalloc should be at least as large as ndalloc");
|
|
expect_u64_gt(nrequests, 0,
|
|
"nrequests should be greater than zero");
|
|
expect_zu_gt(curregs, 0,
|
|
"allocated should be greater than zero");
|
|
if (opt_tcache) {
|
|
expect_u64_gt(nfills, 0,
|
|
"At least one fill should have occurred");
|
|
expect_u64_gt(nflushes, 0,
|
|
"At least one flush should have occurred");
|
|
}
|
|
expect_u64_gt(nslabs, 0,
|
|
"At least one slab should have been allocated");
|
|
expect_zu_gt(curslabs, 0,
|
|
"At least one slab should be currently allocated");
|
|
expect_zu_eq(nonfull_slabs, 0,
|
|
"slabs_nonfull should be empty");
|
|
}
|
|
|
|
dallocx(p, 0);
|
|
}
|
|
TEST_END
|
|
|
|
TEST_BEGIN(test_stats_arenas_lextents) {
|
|
void *p;
|
|
uint64_t epoch, nmalloc, ndalloc;
|
|
size_t curlextents, sz, hsize;
|
|
int expected = config_stats ? 0 : ENOENT;
|
|
|
|
sz = sizeof(size_t);
|
|
expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&hsize, &sz, NULL,
|
|
0), 0, "Unexpected mallctl() failure");
|
|
|
|
p = mallocx(hsize, MALLOCX_ARENA(0));
|
|
expect_ptr_not_null(p, "Unexpected mallocx() failure");
|
|
|
|
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
|
|
0, "Unexpected mallctl() failure");
|
|
|
|
sz = sizeof(uint64_t);
|
|
expect_d_eq(mallctl("stats.arenas.0.lextents.0.nmalloc",
|
|
(void *)&nmalloc, &sz, NULL, 0), expected,
|
|
"Unexpected mallctl() result");
|
|
expect_d_eq(mallctl("stats.arenas.0.lextents.0.ndalloc",
|
|
(void *)&ndalloc, &sz, NULL, 0), expected,
|
|
"Unexpected mallctl() result");
|
|
sz = sizeof(size_t);
|
|
expect_d_eq(mallctl("stats.arenas.0.lextents.0.curlextents",
|
|
(void *)&curlextents, &sz, NULL, 0), expected,
|
|
"Unexpected mallctl() result");
|
|
|
|
if (config_stats) {
|
|
expect_u64_gt(nmalloc, 0,
|
|
"nmalloc should be greater than zero");
|
|
expect_u64_ge(nmalloc, ndalloc,
|
|
"nmalloc should be at least as large as ndalloc");
|
|
expect_u64_gt(curlextents, 0,
|
|
"At least one extent should be currently allocated");
|
|
}
|
|
|
|
dallocx(p, 0);
|
|
}
|
|
TEST_END
|
|
|
|
static void
|
|
test_tcache_bytes_for_usize(size_t usize) {
|
|
uint64_t epoch;
|
|
size_t tcache_bytes, tcache_stashed_bytes;
|
|
size_t sz = sizeof(tcache_bytes);
|
|
|
|
void *ptr = mallocx(usize, 0);
|
|
|
|
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
|
|
0, "Unexpected mallctl() failure");
|
|
assert_d_eq(mallctl(
|
|
"stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL) ".tcache_bytes",
|
|
&tcache_bytes, &sz, NULL, 0), 0, "Unexpected mallctl failure");
|
|
assert_d_eq(mallctl(
|
|
"stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL)
|
|
".tcache_stashed_bytes", &tcache_stashed_bytes, &sz, NULL, 0), 0,
|
|
"Unexpected mallctl failure");
|
|
size_t tcache_bytes_before = tcache_bytes + tcache_stashed_bytes;
|
|
dallocx(ptr, 0);
|
|
|
|
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
|
|
0, "Unexpected mallctl() failure");
|
|
assert_d_eq(mallctl(
|
|
"stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL) ".tcache_bytes",
|
|
&tcache_bytes, &sz, NULL, 0), 0, "Unexpected mallctl failure");
|
|
assert_d_eq(mallctl(
|
|
"stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL)
|
|
".tcache_stashed_bytes", &tcache_stashed_bytes, &sz, NULL, 0), 0,
|
|
"Unexpected mallctl failure");
|
|
size_t tcache_bytes_after = tcache_bytes + tcache_stashed_bytes;
|
|
assert_zu_eq(tcache_bytes_after - tcache_bytes_before,
|
|
usize, "Incorrectly attributed a free");
|
|
}
|
|
|
|
TEST_BEGIN(test_stats_tcache_bytes_small) {
|
|
test_skip_if(!config_stats);
|
|
test_skip_if(!opt_tcache);
|
|
test_skip_if(opt_tcache_max < SC_SMALL_MAXCLASS);
|
|
|
|
test_tcache_bytes_for_usize(SC_SMALL_MAXCLASS);
|
|
}
|
|
TEST_END
|
|
|
|
TEST_BEGIN(test_stats_tcache_bytes_large) {
|
|
test_skip_if(!config_stats);
|
|
test_skip_if(!opt_tcache);
|
|
test_skip_if(opt_tcache_max < SC_LARGE_MINCLASS);
|
|
|
|
test_tcache_bytes_for_usize(SC_LARGE_MINCLASS);
|
|
}
|
|
TEST_END
|
|
|
|
int
|
|
main(void) {
|
|
return test_no_reentrancy(
|
|
test_stats_summary,
|
|
test_stats_large,
|
|
test_stats_arenas_summary,
|
|
test_stats_arenas_small,
|
|
test_stats_arenas_large,
|
|
test_stats_arenas_bins,
|
|
test_stats_arenas_lextents,
|
|
test_stats_tcache_bytes_small,
|
|
test_stats_tcache_bytes_large);
|
|
}
|