Remove hpa_sec_batch_fill_extra and calculate nallocs automatically.

This change includes the following improvements:

- Remove the hpa_sec_batch_fill_extra parameter.
- Refactor the hpa_alloc() code and helper functions to be able to
  allocate more than one extent out of a single pageslab. This way
  we can amortize the per-pageslab costs (active bitmap iteration,
  pageslab metadata updates) across multiple extents.
- Decide on a min and max number of extents that will be allocated
  in hpa_alloc(). The code will try to allocate at least the min
  and allocate up to the max as long as we can allocate additional
  ones from the pageslab we already have, as additional allocations
  are relatively cheap.
- Add extent allocation distribution stats.
- Amend hpa_sec_integration.c unit test.
This commit is contained in:
Tony Printezis 2026-03-02 11:11:09 -08:00 committed by Guangli Dai
parent 639e70fcfb
commit f008ce9fe1
15 changed files with 675 additions and 105 deletions

View file

@ -50,6 +50,60 @@ struct hpa_shard_nonderived_stats_s {
* Guarded by mtx.
*/
uint64_t ndehugifies;
/*
* Distribution of the min number of extents we will try to allocate
* from a single hpa_alloc() call.
*
* Guarded by mtx.
*/
uint64_t hpa_alloc_min_extents[SEC_MAX_NALLOCS + 1];
/*
* Distribution of the max number of extents we will try to allocate
* from a single hpa_alloc() call.
*
* Guarded by mtx.
*/
uint64_t hpa_alloc_max_extents[SEC_MAX_NALLOCS + 1];
/*
* Distribution of the number of extents allocated for a single
* hpa_alloc() call and a single mtx lock acquisition.
*
* Guarded by mtx.
*/
uint64_t hpa_alloc_extents[SEC_MAX_NALLOCS + 1];
/*
* Distribution of the number of ps out of which we allocated extents
* for a single hpa_alloc() call and a single mtx lock acquisition.
*
* Guarded by mtx.
*/
uint64_t hpa_alloc_ps[SEC_MAX_NALLOCS + 1];
/*
* Distribution of the number of pages allocated from a single ps.
*
* Guarded by mtx.
*/
uint64_t hpa_alloc_pages_per_ps[SEC_MAX_NALLOCS + 1];
/*
* Distribution of the number of extents allocated from a single ps.
*
* Guarded by mtx.
*/
uint64_t hpa_alloc_extents_per_ps[SEC_MAX_NALLOCS + 1];
/*
* Distribution of the total elapsed time (ns) for allocating extents
* from a single ps.
*
* Guarded by mtx.
*/
uint64_t hpa_alloc_total_elapsed_ns_per_ps[SEC_MAX_NALLOCS + 1];
};
/* Completely derived; only used by CTL. */

View file

@ -432,6 +432,60 @@ void hpdata_init(hpdata_t *hpdata, void *addr, uint64_t age, bool is_huge);
void *hpdata_reserve_alloc(hpdata_t *hpdata, size_t sz);
void hpdata_unreserve(hpdata_t *hpdata, void *addr, size_t sz);
/*
* For buffering extent allocations we will perform out of
* a single ps.
*/
typedef struct hpdata_alloc_offset_s hpdata_alloc_offset_t;
struct hpdata_alloc_offset_s {
/*
* Index on the active bitmap for the extent to allocate.
* It is used to know which bits we'll need to set when we perform
* the allocation. They are in the range [index, index + npages).
*/
size_t index;
/*
* The length of the free bit range on the active bitmap,
* starting at index, before setting the bits in the range
* [index, index + npages).
* It is used to determine whether one of the allocations
* used up the longest free range on the active bitmap.
* If it did, we might have to update the longest free range
* metadata on the hpdata.
*/
size_t len_before;
/*
* The length of the longest free range in the range [0, index).
* When we need to update the longest free range on the hpdata,
* the new value is either longest_len (the max up to index),
* len_before - npages (what's left after we carve up the free
* range starting at index), or the max in the range
* [index + len_before, HUGEPAGE_PAGES), whichever is greater.
*/
size_t longest_len;
};
/*
* Given an hpdata that can serve an allocation request of size sz,
* find between one and max_nallocs offsets that can satisfy such
* an allocation request and buffer them in offsets (without actually
* reserving any space or updating hpdata). Return the number
* of offsets discovered.
*/
size_t hpdata_find_alloc_offsets(hpdata_t *hpdata, size_t sz,
hpdata_alloc_offset_t *offsets, size_t max_nallocs);
/* Reserve the allocation for the given offset. */
void *hpdata_reserve_alloc_offset(
hpdata_t *hpdata, size_t sz, hpdata_alloc_offset_t *offset);
/*
* Do any work that needs to be done after performing allocations
* from a single hpdata.
*/
void hpdata_post_reserve_alloc_offsets(hpdata_t *hpdata, size_t sz,
hpdata_alloc_offset_t *offsets, size_t nallocs);
/*
* The hpdata_purge_prepare_t allows grabbing the metadata required to purge
* subranges of a hugepage while holding a lock, drop the lock during the actual

View file

@ -97,6 +97,25 @@ sec_size_supported(sec_t *sec, size_t size) {
return sec_is_used(sec) && size <= sec->opts.max_alloc;
}
/* Min number of extents we will allocate when expanding the SEC. */
#define SEC_MIN_NALLOCS 2
/* Max number of extents we will allocate out of a single huge page. */
#define SEC_MAX_NALLOCS 4
/* Attempt to fill the SEC up to max_bytes / SEC_MAX_BYTES_DIV */
#define SEC_MAX_BYTES_DIV 4
/*
* Calculate the min and max number of extents we will try to allocate
* when expanding the SEC. We will attempt to allocate at least min
* extents and up to max extents depending on whether we can allocate
* them out of a huge page we have already allocated out of. Both
* min and max should the in the range [1, SEC_MAX_NALLOCS].
*/
void sec_calc_nallocs_for_size(
sec_t *sec, size_t size, size_t *min_nallocs, size_t *max_nallocs);
/* If sec does not have extent available, it will return NULL. */
edata_t *sec_alloc(tsdn_t *tsdn, sec_t *sec, size_t size);
void sec_fill(tsdn_t *tsdn, sec_t *sec, size_t size,

View file

@ -27,16 +27,9 @@ struct sec_opts_s {
* until we are 1/4 below max_bytes.
*/
size_t max_bytes;
/*
* When we can't satisfy an allocation out of the SEC because there are
* no available ones cached, allocator will allocate a batch with extra
* batch_fill_extra extents of the same size.
*/
size_t batch_fill_extra;
};
#define SEC_OPTS_NSHARDS_DEFAULT 2
#define SEC_OPTS_BATCH_FILL_EXTRA_DEFAULT 3
#define SEC_OPTS_MAX_ALLOC_DEFAULT ((32 * 1024) < PAGE ? PAGE : (32 * 1024))
#define SEC_OPTS_MAX_BYTES_DEFAULT \
((256 * 1024) < (4 * SEC_OPTS_MAX_ALLOC_DEFAULT) \
@ -45,6 +38,6 @@ struct sec_opts_s {
#define SEC_OPTS_DEFAULT \
{SEC_OPTS_NSHARDS_DEFAULT, SEC_OPTS_MAX_ALLOC_DEFAULT, \
SEC_OPTS_MAX_BYTES_DEFAULT, SEC_OPTS_BATCH_FILL_EXTRA_DEFAULT}
SEC_OPTS_MAX_BYTES_DEFAULT}
#endif /* JEMALLOC_INTERNAL_SEC_OPTS_H */

View file

@ -6,6 +6,18 @@
#define UTIL_INLINE static inline
JEMALLOC_ALWAYS_INLINE
size_t
min_zu(size_t a, size_t b) {
return (a < b) ? a : b;
}
JEMALLOC_ALWAYS_INLINE
size_t
max_zu(size_t a, size_t b) {
return (a > b) ? a : b;
}
/* Junk fill patterns. */
#ifndef JEMALLOC_ALLOC_JUNK
# define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5)

View file

@ -225,7 +225,8 @@ conf_error(
/* However, tolerate experimental features. */
return;
}
const char *deprecated[] = {"hpa_sec_bytes_after_flush"};
const char *deprecated[] = {
"hpa_sec_bytes_after_flush", "hpa_sec_batch_fill_extra"};
const size_t deprecated_cnt = (sizeof(deprecated)
/ sizeof(deprecated[0]));
for (size_t i = 0; i < deprecated_cnt; ++i) {
@ -952,9 +953,6 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.max_bytes,
"hpa_sec_max_bytes", SEC_OPTS_MAX_BYTES_DEFAULT, 0,
CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, true);
CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.batch_fill_extra,
"hpa_sec_batch_fill_extra", 1, HUGEPAGE_PAGES,
CONF_CHECK_MIN, CONF_CHECK_MAX, true);
if (CONF_MATCH("slab_sizes")) {
if (CONF_MATCH_VALUE("default")) {

View file

@ -115,7 +115,6 @@ CTL_PROTO(opt_hpa_dirty_mult)
CTL_PROTO(opt_hpa_sec_nshards)
CTL_PROTO(opt_hpa_sec_max_alloc)
CTL_PROTO(opt_hpa_sec_max_bytes)
CTL_PROTO(opt_hpa_sec_batch_fill_extra)
CTL_PROTO(opt_huge_arena_pac_thp)
CTL_PROTO(opt_metadata_thp)
CTL_PROTO(opt_retain)
@ -311,6 +310,14 @@ CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_nonhuge)
CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_huge)
INDEX_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j)
CTL_PROTO(stats_arenas_i_hpa_shard_alloc_j_min_extents)
CTL_PROTO(stats_arenas_i_hpa_shard_alloc_j_max_extents)
CTL_PROTO(stats_arenas_i_hpa_shard_alloc_j_extents)
CTL_PROTO(stats_arenas_i_hpa_shard_alloc_j_ps)
CTL_PROTO(stats_arenas_i_hpa_shard_alloc_j_pages_per_ps)
CTL_PROTO(stats_arenas_i_hpa_shard_alloc_j_extents_per_ps)
CTL_PROTO(stats_arenas_i_hpa_shard_alloc_j_total_elapsed_ns_per_ps)
INDEX_PROTO(stats_arenas_i_hpa_shard_alloc_j)
CTL_PROTO(stats_arenas_i_nthreads)
CTL_PROTO(stats_arenas_i_uptime)
@ -489,7 +496,6 @@ static const ctl_named_node_t opt_node[] = {{NAME("abort"), CTL(opt_abort)},
{NAME("hpa_sec_nshards"), CTL(opt_hpa_sec_nshards)},
{NAME("hpa_sec_max_alloc"), CTL(opt_hpa_sec_max_alloc)},
{NAME("hpa_sec_max_bytes"), CTL(opt_hpa_sec_max_bytes)},
{NAME("hpa_sec_batch_fill_extra"), CTL(opt_hpa_sec_batch_fill_extra)},
{NAME("huge_arena_pac_thp"), CTL(opt_huge_arena_pac_thp)},
{NAME("metadata_thp"), CTL(opt_metadata_thp)},
{NAME("retain"), CTL(opt_retain)}, {NAME("dss"), CTL(opt_dss)},
@ -782,6 +788,23 @@ static const ctl_named_node_t
static const ctl_indexed_node_t stats_arenas_i_hpa_shard_nonfull_slabs_node[] =
{{INDEX(stats_arenas_i_hpa_shard_nonfull_slabs_j)}};
static const ctl_named_node_t stats_arenas_i_hpa_shard_alloc_j_node[] = {
{NAME("min_extents"), CTL(stats_arenas_i_hpa_shard_alloc_j_min_extents)},
{NAME("max_extents"), CTL(stats_arenas_i_hpa_shard_alloc_j_max_extents)},
{NAME("extents"), CTL(stats_arenas_i_hpa_shard_alloc_j_extents)},
{NAME("ps"), CTL(stats_arenas_i_hpa_shard_alloc_j_ps)},
{NAME("pages_per_ps"), CTL(stats_arenas_i_hpa_shard_alloc_j_pages_per_ps)},
{NAME("extents_per_ps"),
CTL(stats_arenas_i_hpa_shard_alloc_j_extents_per_ps)},
{NAME("total_elapsed_ns_per_ps"),
CTL(stats_arenas_i_hpa_shard_alloc_j_total_elapsed_ns_per_ps)}};
static const ctl_named_node_t super_stats_arenas_i_hpa_shard_alloc_j_node[] = {
{NAME(""), CHILD(named, stats_arenas_i_hpa_shard_alloc_j)}};
static const ctl_indexed_node_t stats_arenas_i_hpa_shard_alloc_node[] = {
{INDEX(stats_arenas_i_hpa_shard_alloc_j)}};
static const ctl_named_node_t stats_arenas_i_hpa_shard_node[] = {
{NAME("npageslabs"), CTL(stats_arenas_i_hpa_shard_npageslabs)},
{NAME("nactive"), CTL(stats_arenas_i_hpa_shard_nactive)},
@ -795,6 +818,8 @@ static const ctl_named_node_t stats_arenas_i_hpa_shard_node[] = {
{NAME("nhugify_failures"), CTL(stats_arenas_i_hpa_shard_nhugify_failures)},
{NAME("ndehugifies"), CTL(stats_arenas_i_hpa_shard_ndehugifies)},
{NAME("alloc"), CHILD(indexed, stats_arenas_i_hpa_shard_alloc)},
{NAME("full_slabs"), CHILD(named, stats_arenas_i_hpa_shard_full_slabs)},
{NAME("empty_slabs"), CHILD(named, stats_arenas_i_hpa_shard_empty_slabs)},
{NAME("nonfull_slabs"),
@ -2185,8 +2210,6 @@ CTL_RO_NL_GEN(opt_hpa_slab_max_alloc, opt_hpa_opts.slab_max_alloc, size_t)
CTL_RO_NL_GEN(opt_hpa_sec_nshards, opt_hpa_sec_opts.nshards, size_t)
CTL_RO_NL_GEN(opt_hpa_sec_max_alloc, opt_hpa_sec_opts.max_alloc, size_t)
CTL_RO_NL_GEN(opt_hpa_sec_max_bytes, opt_hpa_sec_opts.max_bytes, size_t)
CTL_RO_NL_GEN(
opt_hpa_sec_batch_fill_extra, opt_hpa_sec_opts.batch_fill_extra, size_t)
CTL_RO_NL_GEN(opt_huge_arena_pac_thp, opt_huge_arena_pac_thp, bool)
CTL_RO_NL_GEN(
opt_metadata_thp, metadata_thp_mode_names[opt_metadata_thp], const char *)
@ -4100,6 +4123,36 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nhugify_failures,
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_ndehugifies,
arenas_i(mib[2])->astats->hpastats.nonderived_stats.ndehugifies, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_alloc_j_min_extents,
arenas_i(mib[2])
->astats->hpastats.nonderived_stats.hpa_alloc_min_extents[mib[5]],
uint64_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_alloc_j_max_extents,
arenas_i(mib[2])
->astats->hpastats.nonderived_stats.hpa_alloc_max_extents[mib[5]],
uint64_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_alloc_j_extents,
arenas_i(mib[2])
->astats->hpastats.nonderived_stats.hpa_alloc_extents[mib[5]],
uint64_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_alloc_j_ps,
arenas_i(mib[2])->astats->hpastats.nonderived_stats.hpa_alloc_ps[mib[5]],
uint64_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_alloc_j_pages_per_ps,
arenas_i(mib[2])
->astats->hpastats.nonderived_stats.hpa_alloc_pages_per_ps[mib[5]],
uint64_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_alloc_j_extents_per_ps,
arenas_i(mib[2])
->astats->hpastats.nonderived_stats.hpa_alloc_extents_per_ps[mib[5]],
uint64_t);
CTL_RO_CGEN(config_stats,
stats_arenas_i_hpa_shard_alloc_j_total_elapsed_ns_per_ps,
arenas_i(mib[2])
->astats->hpastats.nonderived_stats
.hpa_alloc_total_elapsed_ns_per_ps[mib[5]],
uint64_t);
/* Full, nonhuge */
CTL_RO_CGEN(config_stats,
stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge,
@ -4193,6 +4246,15 @@ stats_arenas_i_hpa_shard_nonfull_slabs_j_index(
return super_stats_arenas_i_hpa_shard_nonfull_slabs_j_node;
}
static const ctl_named_node_t *
stats_arenas_i_hpa_shard_alloc_j_index(
tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t j) {
if (j > SEC_MAX_NALLOCS) {
return NULL;
}
return super_stats_arenas_i_hpa_shard_alloc_j_node;
}
static bool
ctl_arenas_i_verify(size_t i) {
size_t a = arenas_i2a_impl(i, true, true);

212
src/hpa.c
View file

@ -98,6 +98,19 @@ hpa_shard_init(tsdn_t *tsdn, hpa_shard_t *shard, hpa_central_t *central,
shard->stats.nhugifies = 0;
shard->stats.nhugify_failures = 0;
shard->stats.ndehugifies = 0;
memset(shard->stats.hpa_alloc_min_extents, 0,
sizeof(shard->stats.hpa_alloc_min_extents));
memset(shard->stats.hpa_alloc_max_extents, 0,
sizeof(shard->stats.hpa_alloc_max_extents));
memset(shard->stats.hpa_alloc_extents, 0,
sizeof(shard->stats.hpa_alloc_extents));
memset(shard->stats.hpa_alloc_ps, 0, sizeof(shard->stats.hpa_alloc_ps));
memset(shard->stats.hpa_alloc_pages_per_ps, 0,
sizeof(shard->stats.hpa_alloc_pages_per_ps));
memset(shard->stats.hpa_alloc_extents_per_ps, 0,
sizeof(shard->stats.hpa_alloc_extents_per_ps));
memset(shard->stats.hpa_alloc_total_elapsed_ns_per_ps, 0,
sizeof(shard->stats.hpa_alloc_total_elapsed_ns_per_ps));
err = sec_init(tsdn, &shard->sec, base, sec_opts);
if (err) {
@ -123,6 +136,18 @@ hpa_shard_nonderived_stats_accum(
dst->nhugifies += src->nhugifies;
dst->nhugify_failures += src->nhugify_failures;
dst->ndehugifies += src->ndehugifies;
for (size_t i = 0; i <= SEC_MAX_NALLOCS; i++) {
dst->hpa_alloc_min_extents[i] += src->hpa_alloc_min_extents[i];
dst->hpa_alloc_max_extents[i] += src->hpa_alloc_max_extents[i];
dst->hpa_alloc_extents[i] += src->hpa_alloc_extents[i];
dst->hpa_alloc_ps[i] += src->hpa_alloc_ps[i];
dst->hpa_alloc_pages_per_ps[i] +=
src->hpa_alloc_pages_per_ps[i];
dst->hpa_alloc_extents_per_ps[i] +=
src->hpa_alloc_extents_per_ps[i];
dst->hpa_alloc_total_elapsed_ns_per_ps[i] +=
src->hpa_alloc_total_elapsed_ns_per_ps[i];
}
}
void
@ -629,37 +654,18 @@ hpa_shard_maybe_do_deferred_work(
}
static edata_t *
hpa_try_alloc_one_no_grow(
tsdn_t *tsdn, hpa_shard_t *shard, size_t size, bool *oom) {
hpa_try_alloc_one_offset(tsdn_t *tsdn, hpa_shard_t *shard, size_t size,
hpdata_t *ps, hpdata_alloc_offset_t *alloc_offset, bool *oom) {
assert(*oom == false);
malloc_mutex_assert_owner(tsdn, &shard->mtx);
bool err;
edata_t *edata = edata_cache_fast_get(tsdn, &shard->ecf);
if (edata == NULL) {
*oom = true;
return NULL;
}
hpdata_t *ps = psset_pick_alloc(&shard->psset, size);
if (ps == NULL) {
edata_cache_fast_put(tsdn, &shard->ecf, edata);
return NULL;
}
psset_update_begin(&shard->psset, ps);
if (hpdata_empty(ps)) {
/*
* If the pageslab used to be empty, treat it as though it's
* brand new for fragmentation-avoidance purposes; what we're
* trying to approximate is the age of the allocations *in* that
* pageslab, and the allocations in the new pageslab are by
* definition the youngest in this hpa shard.
*/
hpdata_age_set(ps, shard->age_counter++);
}
void *addr = hpdata_reserve_alloc(ps, size);
void *addr = hpdata_reserve_alloc_offset(ps, size, alloc_offset);
JE_USDT(hpa_alloc, 5, shard->ind, addr, size, hpdata_nactive_get(ps),
hpdata_age_get(ps));
edata_init(edata, shard->ind, addr, size, /* slab */ false, SC_NSIZES,
@ -671,12 +677,12 @@ hpa_try_alloc_one_no_grow(
/*
* This could theoretically be moved outside of the critical section,
* but that introduces the potential for a race. Without the lock, the
* (initially nonempty, since this is the reuse pathway) pageslab we
* (initially nonempty, since this is the reuse pathway) pageslab we
* allocated out of could become otherwise empty while the lock is
* dropped. This would force us to deal with a pageslab eviction down
* the error pathway, which is a pain.
*/
err = emap_register_boundary(
const bool err = emap_register_boundary(
tsdn, shard->emap, edata, SC_NSIZES, /* slab */ false);
if (err) {
hpdata_unreserve(
@ -693,32 +699,118 @@ hpa_try_alloc_one_no_grow(
* principle that we didn't *really* affect shard state (we
* tweaked the stats, but our tweaks weren't really accurate).
*/
psset_update_end(&shard->psset, ps);
edata_cache_fast_put(tsdn, &shard->ecf, edata);
*oom = true;
return NULL;
}
hpa_update_purge_hugify_eligibility(tsdn, shard, ps);
psset_update_end(&shard->psset, ps);
return edata;
}
static size_t
hpa_try_alloc_batch_no_grow_locked(tsdn_t *tsdn, hpa_shard_t *shard,
size_t size, bool *oom, size_t nallocs, edata_list_active_t *results,
hpa_try_alloc_from_one_ps(tsdn_t *tsdn, hpa_shard_t *shard, size_t size,
size_t max_nallocs, bool *oom, edata_list_active_t *results,
bool *deferred_work_generated) {
assert(size <= HUGEPAGE);
assert(size <= shard->opts.slab_max_alloc || size == sz_s2u(size));
assert(*oom == false);
malloc_mutex_assert_owner(tsdn, &shard->mtx);
nstime_t start;
nstime_init_update(&start);
hpdata_t *ps = psset_pick_alloc(&shard->psset, size);
if (ps == NULL) {
return 0;
}
assert(max_nallocs <= SEC_MAX_NALLOCS);
hpdata_alloc_offset_t alloc_offsets[SEC_MAX_NALLOCS];
const size_t nallocs = hpdata_find_alloc_offsets(
ps, size, alloc_offsets, max_nallocs);
psset_update_begin(&shard->psset, ps);
if (hpdata_empty(ps)) {
/*
* If the pageslab used to be empty, treat it as though it's
* brand new for fragmentation-avoidance purposes; what we're
* trying to approximate is the age of the allocations *in* that
* pageslab, and the allocations in the new pageslab are by
* definition the youngest in this hpa shard.
*/
hpdata_age_set(ps, shard->age_counter++);
}
size_t nsuccess = 0;
for (; nsuccess < nallocs; nsuccess++) {
edata_t *edata = hpa_try_alloc_one_no_grow(
tsdn, shard, size, oom);
for (; nsuccess < nallocs; nsuccess += 1) {
edata_t *edata = hpa_try_alloc_one_offset(
tsdn, shard, size, ps, (alloc_offsets + nsuccess), oom);
if (edata == NULL) {
break;
}
edata_list_active_append(results, edata);
}
hpdata_post_reserve_alloc_offsets(ps, size, alloc_offsets, nsuccess);
hpa_update_purge_hugify_eligibility(tsdn, shard, ps);
psset_update_end(&shard->psset, ps);
const uint64_t elapsed_ns = nstime_ns_since(&start);
assert(nsuccess <= SEC_MAX_NALLOCS);
shard->stats.hpa_alloc_pages_per_ps[nsuccess] += nsuccess
* (size >> LG_PAGE);
shard->stats.hpa_alloc_extents_per_ps[nsuccess] += 1;
shard->stats.hpa_alloc_total_elapsed_ns_per_ps[nsuccess] += elapsed_ns;
return nsuccess;
}
static size_t
hpa_try_alloc_batch_no_grow_locked(tsdn_t *tsdn, hpa_shard_t *shard,
size_t size, size_t min_nallocs, size_t max_nallocs,
bool update_min_max_stats, bool *oom, edata_list_active_t *results,
bool *deferred_work_generated) {
assert(*oom == false);
malloc_mutex_assert_owner(tsdn, &shard->mtx);
/*
* As we require the shard mtx lock to update the stats,
* we do the update the first time this function is called from
* hpa_alloc_batch_psset().
*/
if (update_min_max_stats) {
assert(min_nallocs <= SEC_MAX_NALLOCS);
shard->stats.hpa_alloc_min_extents[min_nallocs] += 1;
assert(max_nallocs <= SEC_MAX_NALLOCS);
shard->stats.hpa_alloc_max_extents[max_nallocs] += 1;
}
size_t nsuccess = 0;
size_t ps_count = 0;
while (true) {
assert(1 <= min_nallocs);
assert(nsuccess < min_nallocs);
assert(min_nallocs <= max_nallocs);
const size_t nallocs = hpa_try_alloc_from_one_ps(tsdn, shard,
size, max_nallocs - nsuccess, oom, results,
deferred_work_generated);
if (nallocs == 0 || *oom) {
break;
}
nsuccess += nallocs;
ps_count += 1;
if (min_nallocs <= nsuccess) {
break;
}
}
assert(nsuccess <= SEC_MAX_NALLOCS);
shard->stats.hpa_alloc_extents[nsuccess] += 1;
assert(ps_count <= SEC_MAX_NALLOCS);
shard->stats.hpa_alloc_ps[ps_count] += 1;
hpa_shard_maybe_do_deferred_work(tsdn, shard, /* forced */ false);
*deferred_work_generated = hpa_shard_has_deferred_work(tsdn, shard);
return nsuccess;
@ -726,27 +818,26 @@ hpa_try_alloc_batch_no_grow_locked(tsdn_t *tsdn, hpa_shard_t *shard,
static size_t
hpa_try_alloc_batch_no_grow(tsdn_t *tsdn, hpa_shard_t *shard, size_t size,
bool *oom, size_t nallocs, edata_list_active_t *results,
bool *deferred_work_generated) {
size_t min_nallocs, size_t max_nallocs, bool update_min_max_stats,
bool *oom, edata_list_active_t *results, bool *deferred_work_generated) {
malloc_mutex_lock(tsdn, &shard->mtx);
size_t nsuccess = hpa_try_alloc_batch_no_grow_locked(
tsdn, shard, size, oom, nallocs, results, deferred_work_generated);
const size_t nsuccess = hpa_try_alloc_batch_no_grow_locked(tsdn, shard,
size, min_nallocs, max_nallocs, update_min_max_stats, oom, results,
deferred_work_generated);
malloc_mutex_unlock(tsdn, &shard->mtx);
return nsuccess;
}
static size_t
hpa_alloc_batch_psset(tsdn_t *tsdn, hpa_shard_t *shard, size_t size,
size_t nallocs, edata_list_active_t *results,
size_t min_nallocs, size_t max_nallocs, edata_list_active_t *results,
bool *deferred_work_generated) {
assert(size <= HUGEPAGE);
assert(size <= shard->opts.slab_max_alloc || size == sz_s2u(size));
bool oom = false;
size_t nsuccess = hpa_try_alloc_batch_no_grow(
tsdn, shard, size, &oom, nallocs, results, deferred_work_generated);
if (nsuccess == nallocs || oom) {
size_t nsuccess = hpa_try_alloc_batch_no_grow(tsdn, shard, size,
min_nallocs, max_nallocs, /* update_min_max_stats */ true, &oom,
results, deferred_work_generated);
if (min_nallocs <= nsuccess || oom) {
return nsuccess;
}
@ -755,13 +846,18 @@ hpa_alloc_batch_psset(tsdn_t *tsdn, hpa_shard_t *shard, size_t size,
* try to grow.
*/
malloc_mutex_lock(tsdn, &shard->grow_mtx);
/*
* Check for grow races; maybe some earlier thread expanded the psset
* in between when we dropped the main mutex and grabbed the grow mutex.
*/
nsuccess += hpa_try_alloc_batch_no_grow(tsdn, shard, size, &oom,
nallocs - nsuccess, results, deferred_work_generated);
if (nsuccess == nallocs || oom) {
assert(nsuccess < min_nallocs);
assert(min_nallocs <= max_nallocs);
nsuccess += hpa_try_alloc_batch_no_grow(tsdn, shard, size,
min_nallocs - nsuccess, max_nallocs - nsuccess,
/* update_min_max_stats */ false, &oom, results,
deferred_work_generated);
if (min_nallocs <= nsuccess || oom) {
malloc_mutex_unlock(tsdn, &shard->grow_mtx);
return nsuccess;
}
@ -785,14 +881,14 @@ hpa_alloc_batch_psset(tsdn_t *tsdn, hpa_shard_t *shard, size_t size,
*/
malloc_mutex_lock(tsdn, &shard->mtx);
psset_insert(&shard->psset, ps);
nsuccess += hpa_try_alloc_batch_no_grow_locked(tsdn, shard, size, &oom,
nallocs - nsuccess, results, deferred_work_generated);
assert(nsuccess < min_nallocs);
assert(min_nallocs <= max_nallocs);
nsuccess += hpa_try_alloc_batch_no_grow_locked(tsdn, shard, size,
min_nallocs - nsuccess, max_nallocs - nsuccess,
/* update_min_max_stats */ false, &oom, results,
deferred_work_generated);
malloc_mutex_unlock(tsdn, &shard->mtx);
/*
* Drop grow_mtx before doing deferred work; other threads blocked on it
* should be allowed to proceed while we're working.
*/
malloc_mutex_unlock(tsdn, &shard->grow_mtx);
return nsuccess;
@ -855,13 +951,13 @@ hpa_alloc(tsdn_t *tsdn, hpa_shard_t *shard, size_t size, size_t alignment,
if (edata != NULL) {
return edata;
}
size_t nallocs = sec_size_supported(&shard->sec, size)
? shard->sec.opts.batch_fill_extra + 1
: 1;
edata_list_active_t results;
edata_list_active_init(&results);
size_t nsuccess = hpa_alloc_batch_psset(
tsdn, shard, size, nallocs, &results, deferred_work_generated);
size_t min_nallocs, max_nallocs;
sec_calc_nallocs_for_size(
&shard->sec, size, &min_nallocs, &max_nallocs);
size_t nsuccess = hpa_alloc_batch_psset(tsdn, shard, size, min_nallocs,
max_nallocs, &results, deferred_work_generated);
hpa_assert_results(tsdn, shard, &results);
edata = edata_list_active_first(&results);

View file

@ -170,6 +170,171 @@ hpdata_unreserve(hpdata_t *hpdata, void *addr, size_t sz) {
hpdata_assert_consistent(hpdata);
}
size_t
hpdata_find_alloc_offsets(hpdata_t *hpdata, size_t sz,
hpdata_alloc_offset_t *offsets, size_t max_nallocs) {
hpdata_assert_consistent(hpdata);
assert((sz & PAGE_MASK) == 0);
assert(1 <= max_nallocs);
const size_t npages = sz >> LG_PAGE;
/* We should be able to find at least one allocation */
assert(npages <= hpdata_longest_free_range_get(hpdata));
size_t nallocs = 0;
size_t start = 0;
size_t longest_len = 0;
while (true) {
size_t begin = 0;
size_t len = 0;
const bool found = fb_urange_iter(
hpdata->active_pages, HUGEPAGE_PAGES, start, &begin, &len);
if (!found) {
/* we should have found at least one */
assert(0 < nallocs);
break;
}
/* carve up the free range, if it's large enough */
while (npages <= len) {
offsets->len_before = len;
offsets->index = begin;
offsets->longest_len = longest_len;
offsets += 1;
nallocs += 1;
if (nallocs == max_nallocs) {
/* cause start to be == HUGEPAGE_PAGES to break out of the outer loop */
begin = HUGEPAGE_PAGES;
len = 0;
break;
}
begin += npages;
len -= npages;
}
start = begin + len;
assert(start <= HUGEPAGE_PAGES);
if (start == HUGEPAGE_PAGES) {
break;
}
longest_len = max_zu(longest_len, len);
}
/* post-conditions */
assert(1 <= nallocs);
assert(nallocs <= max_nallocs);
return nallocs;
}
void *
hpdata_reserve_alloc_offset(
hpdata_t *hpdata, size_t sz, hpdata_alloc_offset_t *offset) {
/*
* This is a metadata change; the hpdata should therefore either not be
* in the psset, or should have explicitly marked itself as being
* mid-update.
*/
assert(!hpdata->h_in_psset || hpdata->h_updating);
assert(hpdata->h_alloc_allowed);
assert((sz & PAGE_MASK) == 0);
const size_t npages = sz >> LG_PAGE;
const size_t index = offset->index;
fb_set_range(hpdata->active_pages, HUGEPAGE_PAGES, index, npages);
hpdata->h_nactive += npages;
/*
* We might be about to dirty some memory for the first time; update our
* count if so.
*/
size_t new_dirty = fb_ucount(
hpdata->touched_pages, HUGEPAGE_PAGES, index, npages);
fb_set_range(hpdata->touched_pages, HUGEPAGE_PAGES, index, npages);
hpdata->h_ntouched += new_dirty;
return (void *)((byte_t *)hpdata_addr_get(hpdata) + (index << LG_PAGE));
}
void
hpdata_post_reserve_alloc_offsets(hpdata_t *hpdata, size_t sz,
hpdata_alloc_offset_t *offsets, size_t nallocs) {
assert((sz & PAGE_MASK) == 0);
const size_t npages = sz >> LG_PAGE;
if (nallocs == 0) {
return;
}
size_t max_len = offsets[0].len_before;
for (size_t i = 1; i < nallocs; i += 1) {
max_len = max_zu(max_len, offsets[i].len_before);
}
const size_t prev_longest = hpdata_longest_free_range_get(hpdata);
assert(max_len <= prev_longest);
if (max_len < prev_longest) {
/* no need to update the hpdata longest range */
return;
}
/*
* If we allocated out of a range that was the longest in the hpdata, it
* might be the only one of that size and we'll have to adjust the
* metadata.
*/
const size_t len_before = offsets[nallocs - 1].len_before;
size_t start = offsets[nallocs - 1].index + len_before;
size_t longest_len = max_zu(
offsets[nallocs - 1].longest_len, len_before - npages);
const size_t rest = HUGEPAGE_PAGES - start;
/*
* Only look at the rest if we think we'll find a range longer than what
* we already have. This also implicitly checks for rest == 0, so we don't
* have to check before the first call to fb_urange_iter().
*/
if (longest_len < rest) {
while (true) {
size_t begin = 0;
size_t len = 0;
const bool found = fb_urange_iter(hpdata->active_pages,
HUGEPAGE_PAGES, start, &begin, &len);
if (!found) {
break;
}
if (longest_len < len) {
if (len == prev_longest) {
/* it's already set to the right value */
assert(hpdata_longest_free_range_get(
hpdata)
== len);
assert(fb_urange_longest(
hpdata->active_pages,
HUGEPAGE_PAGES)
== len);
return;
}
longest_len = len;
}
start = begin + len;
assert(start <= HUGEPAGE_PAGES);
if (start == HUGEPAGE_PAGES) {
break;
}
}
}
assert(fb_urange_longest(hpdata->active_pages, HUGEPAGE_PAGES)
== longest_len);
hpdata_longest_free_range_set(hpdata, longest_len);
}
size_t
hpdata_purge_begin(
hpdata_t *hpdata, hpdata_purge_state_t *purge_state, size_t *nranges) {

View file

@ -162,13 +162,13 @@ void (*JET_MUTABLE junk_free_callback)(
void *ptr, size_t size) = &default_junk_free;
void (*JET_MUTABLE invalid_conf_abort)(void) = &abort;
bool opt_utrace = false;
bool opt_xmalloc = false;
bool opt_experimental_infallible_new = false;
bool opt_experimental_tcache_gc = true;
bool opt_zero = false;
unsigned opt_narenas = 0;
fxp_t opt_narenas_ratio = FXP_INIT_INT(4);
bool opt_utrace = false;
bool opt_xmalloc = false;
bool opt_experimental_infallible_new = false;
bool opt_experimental_tcache_gc = true;
bool opt_zero = false;
unsigned opt_narenas = 0;
fxp_t opt_narenas_ratio = FXP_INIT_INT(4);
unsigned ncpus;
@ -208,7 +208,6 @@ typedef struct {
# define UTRACE(a, b, c)
#endif
/******************************************************************************/
/*
* Begin miscellaneous support functions.

View file

@ -90,6 +90,35 @@ sec_bin_pick(sec_t *sec, uint8_t shard, pszind_t pszind) {
return &sec->bins[ind];
}
void
sec_calc_nallocs_for_size(
sec_t *sec, size_t size, size_t *min_nallocs_ret, size_t *max_nallocs_ret) {
size_t min_nallocs = 1;
size_t max_nallocs = 1;
if (sec_size_supported(sec, size)) {
/*
* This attempts to fill up to 1/SEC_MAX_BYTES_DIV of the SEC.
* If we go much over that, we might cause purging.
* This is mainly an issue when max_bytes is small (256K)
* and size is large. For larger max_bytes, we will
* almost always end up with SEC_MAX_NALLOCS.
*/
size_t nallocs = sec->opts.max_bytes / size / SEC_MAX_BYTES_DIV;
nallocs = max_zu(nallocs, SEC_MIN_NALLOCS);
min_nallocs = SEC_MIN_NALLOCS;
max_nallocs = min_zu(nallocs, SEC_MAX_NALLOCS);
}
/* post-conditions */
assert(1 <= min_nallocs);
assert(min_nallocs <= max_nallocs);
assert(max_nallocs <= SEC_MAX_NALLOCS);
*min_nallocs_ret = min_nallocs;
*max_nallocs_ret = max_nallocs;
}
static edata_t *
sec_bin_alloc_locked(tsdn_t *tsdn, sec_t *sec, sec_bin_t *bin, size_t size) {
malloc_mutex_assert_owner(tsdn, &bin->mtx);

View file

@ -902,9 +902,7 @@ stats_arena_hpa_shard_counters_print(
" / sec)\n"
" Hugify failures: %" FMTu64 " (%" FMTu64
" / sec)\n"
" Dehugifies: %" FMTu64 " (%" FMTu64
" / sec)\n"
"\n",
" Dehugifies: %" FMTu64 " (%" FMTu64 " / sec)\n",
npageslabs, npageslabs_huge, npageslabs_nonhuge, nactive,
nactive_huge, nactive_nonhuge, ndirty, ndirty_huge, ndirty_nonhuge,
nretained_nonhuge, npurge_passes,
@ -944,6 +942,82 @@ stats_arena_hpa_shard_counters_print(
emitter_json_kv(
emitter, "ndirty_huge", emitter_type_size, &ndirty_huge);
emitter_json_object_end(emitter); /* End "slabs" */
/* alloc_batch stats */
uint64_t hpa_alloc_min_extents[SEC_MAX_NALLOCS + 1];
uint64_t hpa_alloc_max_extents[SEC_MAX_NALLOCS + 1];
uint64_t hpa_alloc_extents[SEC_MAX_NALLOCS + 1];
uint64_t hpa_alloc_ps[SEC_MAX_NALLOCS + 1];
uint64_t hpa_alloc_pages_per_ps[SEC_MAX_NALLOCS + 1];
uint64_t hpa_alloc_extents_per_ps[SEC_MAX_NALLOCS + 1];
uint64_t hpa_alloc_total_elapsed_ns_per_ps[SEC_MAX_NALLOCS + 1];
size_t alloc_mib[CTL_MAX_DEPTH];
CTL_LEAF_PREPARE(alloc_mib, 0, "stats.arenas");
alloc_mib[2] = i;
CTL_LEAF_PREPARE(alloc_mib, 3, "hpa_shard.alloc");
for (size_t j = 0; j <= SEC_MAX_NALLOCS; j += 1) {
alloc_mib[5] = j;
CTL_LEAF(alloc_mib, 6, "min_extents", &hpa_alloc_min_extents[j],
uint64_t);
CTL_LEAF(alloc_mib, 6, "max_extents", &hpa_alloc_max_extents[j],
uint64_t);
CTL_LEAF(
alloc_mib, 6, "extents", &hpa_alloc_extents[j], uint64_t);
CTL_LEAF(alloc_mib, 6, "ps", &hpa_alloc_ps[j], uint64_t);
CTL_LEAF(alloc_mib, 6, "pages_per_ps",
&hpa_alloc_pages_per_ps[j], uint64_t);
CTL_LEAF(alloc_mib, 6, "extents_per_ps",
&hpa_alloc_extents_per_ps[j], uint64_t);
CTL_LEAF(alloc_mib, 6, "total_elapsed_ns_per_ps",
&hpa_alloc_total_elapsed_ns_per_ps[j], uint64_t);
}
emitter_table_printf(emitter, " extent allocation distribution:\n");
emitter_table_printf(emitter,
" %4s %20s %20s %20s %20s %20s %20s %24s %24s\n", "",
"min_extents", "max_extents",
"extents", "ps", "pages_per_ps", "extents_per_ps",
"total_elapsed_ns_per_ps", "elapsed_ns_per_ps");
for (size_t j = 0; j <= SEC_MAX_NALLOCS; j += 1) {
const uint64_t extents_per_ps = hpa_alloc_extents_per_ps[j];
const uint64_t total_elapsed_ns_per_ps =
hpa_alloc_total_elapsed_ns_per_ps[j];
const uint64_t elapsed_ns_per_ps = (extents_per_ps != 0)
? (total_elapsed_ns_per_ps / extents_per_ps)
: 0;
emitter_table_printf(emitter,
" %4zu %20" FMTu64 " %20" FMTu64 " %20" FMTu64
" %20" FMTu64 " %20" FMTu64 " %20" FMTu64 " %24" FMTu64
" %24" FMTu64 "\n",
j, hpa_alloc_min_extents[j], hpa_alloc_max_extents[j],
hpa_alloc_extents[j],
hpa_alloc_ps[j], hpa_alloc_pages_per_ps[j], extents_per_ps,
total_elapsed_ns_per_ps, elapsed_ns_per_ps);
}
emitter_table_printf(emitter, "\n");
emitter_json_array_kv_begin(emitter, "extent_allocation_distribution");
for (size_t j = 0; j <= SEC_MAX_NALLOCS; j += 1) {
emitter_json_object_begin(emitter);
emitter_json_kv(emitter, "min_extents", emitter_type_uint64,
&hpa_alloc_min_extents[j]);
emitter_json_kv(emitter, "max_extents", emitter_type_uint64,
&hpa_alloc_max_extents[j]);
emitter_json_kv(emitter, "extents", emitter_type_uint64,
&hpa_alloc_extents[j]);
emitter_json_kv(
emitter, "ps", emitter_type_uint64, &hpa_alloc_ps[j]);
emitter_json_kv(emitter, "pages_per_ps", emitter_type_uint64,
&hpa_alloc_pages_per_ps[j]);
emitter_json_kv(emitter, "extents_per_ps", emitter_type_uint64,
&hpa_alloc_extents_per_ps[j]);
emitter_json_kv(emitter, "total_elapsed_ns_per_ps",
emitter_type_uint64, &hpa_alloc_total_elapsed_ns_per_ps[j]);
emitter_json_object_end(emitter);
}
emitter_json_array_end(emitter); /* End "alloc_batch" */
}
static void
@ -1687,7 +1761,6 @@ stats_general_print(emitter_t *emitter) {
OPT_WRITE_SIZE_T("hpa_sec_nshards")
OPT_WRITE_SIZE_T("hpa_sec_max_alloc")
OPT_WRITE_SIZE_T("hpa_sec_max_bytes")
OPT_WRITE_SIZE_T("hpa_sec_batch_fill_extra")
OPT_WRITE_BOOL("huge_arena_pac_thp")
OPT_WRITE_CHAR_P("metadata_thp")
OPT_WRITE_INT64("mutex_max_spin")

View file

@ -161,11 +161,13 @@ TEST_BEGIN(test_hpa_sec) {
sec_opts.nshards = 1;
sec_opts.max_alloc = 2 * PAGE;
sec_opts.max_bytes = NALLOCS * PAGE;
sec_opts.batch_fill_extra = 4;
hpa_shard_t *shard = create_test_data(&hooks, &opts, &sec_opts);
bool deferred_work_generated = false;
tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
size_t min_nallocs, target_nallocs;
sec_calc_nallocs_for_size(
&shard->sec, PAGE, &min_nallocs, &target_nallocs);
/* alloc 1 PAGE, confirm sec has fill_extra bytes. */
edata_t *edata1 = hpa_alloc(tsdn, shard, PAGE, PAGE, false, false,
@ -174,38 +176,53 @@ TEST_BEGIN(test_hpa_sec) {
hpa_shard_stats_t hpa_stats;
memset(&hpa_stats, 0, sizeof(hpa_shard_stats_t));
hpa_shard_stats_merge(tsdn, shard, &hpa_stats);
expect_zu_eq(hpa_stats.psset_stats.merged.nactive,
1 + sec_opts.batch_fill_extra, "");
expect_zu_eq(hpa_stats.secstats.bytes, PAGE * sec_opts.batch_fill_extra,
"sec should have fill extra pages");
expect_zu_eq(hpa_stats.psset_stats.merged.nactive, target_nallocs, "");
expect_zu_eq(hpa_stats.secstats.bytes, (target_nallocs - 1) * PAGE,
"sec should have extra pages");
expect_u64_eq(
hpa_stats.nonderived_stats.hpa_alloc_extents[target_nallocs],
(uint64_t)1, "");
/* Alloc/dealloc NALLOCS times and confirm extents are in sec. */
edata_t *edatas[NALLOCS];
size_t expected_nactive = NALLOCS + target_nallocs;
for (int i = 0; i < NALLOCS; i++) {
edatas[i] = hpa_alloc(tsdn, shard, PAGE, PAGE, false,
false, false, &deferred_work_generated);
edatas[i] = hpa_alloc(tsdn, shard, PAGE, PAGE, false, false,
false, &deferred_work_generated);
expect_ptr_not_null(edatas[i], "Unexpected null edata");
}
memset(&hpa_stats, 0, sizeof(hpa_shard_stats_t));
hpa_shard_stats_merge(tsdn, shard, &hpa_stats);
expect_zu_eq(hpa_stats.psset_stats.merged.nactive, 2 + NALLOCS, "");
expect_zu_eq(hpa_stats.secstats.bytes, PAGE, "2 refills (at 0 and 4)");
expect_zu_eq(
hpa_stats.psset_stats.merged.nactive, expected_nactive, "");
expect_zu_eq(hpa_stats.secstats.bytes, (target_nallocs - 1) * PAGE,
"multiple refills (every target_nallocs allocations)");
const uint64_t expected_nsuccesses =
(uint64_t)((NALLOCS + 1 + target_nallocs - 1) / target_nallocs);
expect_u64_eq(
hpa_stats.nonderived_stats.hpa_alloc_extents[target_nallocs],
expected_nsuccesses, "");
for (int i = 0; i < NALLOCS - 1; i++) {
hpa_dalloc(tsdn, shard, edatas[i], &deferred_work_generated);
}
memset(&hpa_stats, 0, sizeof(hpa_shard_stats_t));
hpa_shard_stats_merge(tsdn, shard, &hpa_stats);
expect_zu_eq(hpa_stats.psset_stats.merged.nactive, (2 + NALLOCS), "");
expect_zu_eq(
hpa_stats.psset_stats.merged.nactive, expected_nactive, "");
expect_zu_eq(
hpa_stats.secstats.bytes, sec_opts.max_bytes, "sec should be full");
/* this one should flush 1 + 0.25 * 8 = 3 extents */
/* this one should flush 1 + 0.25 * NALLOCS extents */
const size_t flushed_extends = 1 + NALLOCS / 4;
const size_t expected_native_minus_flushed = expected_nactive
- flushed_extends;
hpa_dalloc(tsdn, shard, edatas[NALLOCS - 1], &deferred_work_generated);
memset(&hpa_stats, 0, sizeof(hpa_shard_stats_t));
hpa_shard_stats_merge(tsdn, shard, &hpa_stats);
expect_zu_eq(hpa_stats.psset_stats.merged.nactive, (NALLOCS - 1), "");
expect_zu_eq(hpa_stats.psset_stats.merged.ndirty, 3, "");
expect_zu_eq(hpa_stats.psset_stats.merged.nactive,
expected_native_minus_flushed, "");
expect_zu_eq(hpa_stats.psset_stats.merged.ndirty, flushed_extends, "");
expect_zu_eq(hpa_stats.secstats.bytes, 0.75 * sec_opts.max_bytes,
"sec should be full");
@ -215,7 +232,8 @@ TEST_BEGIN(test_hpa_sec) {
expect_ptr_not_null(edata2, "Unexpected null edata");
memset(&hpa_stats, 0, sizeof(hpa_shard_stats_t));
hpa_shard_stats_merge(tsdn, shard, &hpa_stats);
expect_zu_eq(hpa_stats.psset_stats.merged.nactive, NALLOCS - 1, "");
expect_zu_eq(hpa_stats.psset_stats.merged.nactive,
expected_native_minus_flushed, "");
expect_zu_eq(hpa_stats.secstats.bytes, 0.75 * sec_opts.max_bytes - PAGE,
"sec should have max_bytes minus one page that just came from it");
@ -223,8 +241,9 @@ TEST_BEGIN(test_hpa_sec) {
hpa_dalloc(tsdn, shard, edata2, &deferred_work_generated);
memset(&hpa_stats, 0, sizeof(hpa_shard_stats_t));
hpa_shard_stats_merge(tsdn, shard, &hpa_stats);
expect_zu_eq(hpa_stats.psset_stats.merged.nactive, NALLOCS - 1, "");
expect_zu_eq(hpa_stats.psset_stats.merged.ndirty, 3, "");
expect_zu_eq(hpa_stats.psset_stats.merged.nactive,
expected_native_minus_flushed, "");
expect_zu_eq(hpa_stats.psset_stats.merged.ndirty, flushed_extends, "");
expect_zu_eq(hpa_stats.secstats.bytes, 0.75 * sec_opts.max_bytes, "");
destroy_test_data(shard);

View file

@ -312,7 +312,6 @@ TEST_BEGIN(test_mallctl_opt) {
TEST_MALLCTL_OPT(size_t, hpa_sec_nshards, always);
TEST_MALLCTL_OPT(size_t, hpa_sec_max_alloc, always);
TEST_MALLCTL_OPT(size_t, hpa_sec_max_bytes, always);
TEST_MALLCTL_OPT(size_t, hpa_sec_batch_fill_extra, always);
TEST_MALLCTL_OPT(ssize_t, experimental_hpa_max_purge_nhp, always);
TEST_MALLCTL_OPT(size_t, hpa_purge_threshold, always);
TEST_MALLCTL_OPT(uint64_t, hpa_min_purge_delay_ms, always);

View file

@ -69,7 +69,6 @@ TEST_BEGIN(test_sec_fill) {
opts.nshards = 1;
opts.max_alloc = 2 * PAGE;
opts.max_bytes = 4 * PAGE;
opts.batch_fill_extra = 2;
tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
test_data_init(tsdn, &tdata, &opts);
@ -114,7 +113,6 @@ TEST_BEGIN(test_sec_alloc) {
opts.nshards = 1;
opts.max_alloc = 2 * PAGE;
opts.max_bytes = 4 * PAGE;
opts.batch_fill_extra = 1;
tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
test_data_init(tsdn, &tdata, &opts);