Remove hpa_sec_batch_fill_extra and calculate nallocs dynamically.

This commit is contained in:
Tony Printezis 2026-03-02 11:11:09 -08:00
parent 6515df8cec
commit c0e2a93e4d
14 changed files with 268 additions and 98 deletions

View file

@ -161,26 +161,27 @@ TEST_BEGIN(test_hpa_sec) {
sec_opts.nshards = 1;
sec_opts.max_alloc = 2 * PAGE;
sec_opts.max_bytes = NALLOCS * PAGE;
sec_opts.batch_fill_extra = 4;
hpa_shard_t *shard = create_test_data(&hooks, &opts, &sec_opts);
bool deferred_work_generated = false;
tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
const size_t target_nallocs = sec_calc_nallocs_for_size(
&shard->sec, PAGE);
/* alloc 1 PAGE, confirm sec has fill_extra bytes. */
/* Alloc 1 PAGE, confirm sec has the expected extra pages. */
edata_t *edata1 = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false, false,
false, &deferred_work_generated);
expect_ptr_not_null(edata1, "Unexpected null edata");
hpa_shard_stats_t hpa_stats;
memset(&hpa_stats, 0, sizeof(hpa_shard_stats_t));
hpa_shard_stats_merge(tsdn, shard, &hpa_stats);
expect_zu_eq(hpa_stats.psset_stats.merged.nactive,
1 + sec_opts.batch_fill_extra, "");
expect_zu_eq(hpa_stats.secstats.bytes, PAGE * sec_opts.batch_fill_extra,
"sec should have fill extra pages");
expect_zu_eq(hpa_stats.psset_stats.merged.nactive, target_nallocs, "");
expect_zu_eq(hpa_stats.secstats.bytes, (target_nallocs - 1) * PAGE,
"sec should have extra pages");
/* Alloc/dealloc NALLOCS times and confirm extents are in sec. */
edata_t *edatas[NALLOCS];
size_t expected_nactive = NALLOCS + target_nallocs;
for (int i = 0; i < NALLOCS; i++) {
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
false, false, &deferred_work_generated);
@ -188,8 +189,10 @@ TEST_BEGIN(test_hpa_sec) {
}
memset(&hpa_stats, 0, sizeof(hpa_shard_stats_t));
hpa_shard_stats_merge(tsdn, shard, &hpa_stats);
expect_zu_eq(hpa_stats.psset_stats.merged.nactive, 2 + NALLOCS, "");
expect_zu_eq(hpa_stats.secstats.bytes, PAGE, "2 refills (at 0 and 4)");
expect_zu_eq(
hpa_stats.psset_stats.merged.nactive, expected_nactive, "");
expect_zu_eq(hpa_stats.secstats.bytes, (target_nallocs - 1) * PAGE,
"multiple refills (every target_nallocs allocations)");
for (int i = 0; i < NALLOCS - 1; i++) {
pai_dalloc(
@ -197,17 +200,22 @@ TEST_BEGIN(test_hpa_sec) {
}
memset(&hpa_stats, 0, sizeof(hpa_shard_stats_t));
hpa_shard_stats_merge(tsdn, shard, &hpa_stats);
expect_zu_eq(hpa_stats.psset_stats.merged.nactive, (2 + NALLOCS), "");
expect_zu_eq(
hpa_stats.psset_stats.merged.nactive, expected_nactive, "");
expect_zu_eq(
hpa_stats.secstats.bytes, sec_opts.max_bytes, "sec should be full");
/* this one should flush 1 + 0.25 * 8 = 3 extents */
/* this one should flush 1 + 0.25 * NALLOCS extents */
const size_t flushed_extends = 1 + NALLOCS / 4;
const size_t expected_native_minus_flushed = expected_nactive
- flushed_extends;
pai_dalloc(
tsdn, &shard->pai, edatas[NALLOCS - 1], &deferred_work_generated);
memset(&hpa_stats, 0, sizeof(hpa_shard_stats_t));
hpa_shard_stats_merge(tsdn, shard, &hpa_stats);
expect_zu_eq(hpa_stats.psset_stats.merged.nactive, (NALLOCS - 1), "");
expect_zu_eq(hpa_stats.psset_stats.merged.ndirty, 3, "");
expect_zu_eq(hpa_stats.psset_stats.merged.nactive,
expected_native_minus_flushed, "");
expect_zu_eq(hpa_stats.psset_stats.merged.ndirty, flushed_extends, "");
expect_zu_eq(hpa_stats.secstats.bytes, 0.75 * sec_opts.max_bytes,
"sec should be full");
@ -217,7 +225,8 @@ TEST_BEGIN(test_hpa_sec) {
expect_ptr_not_null(edata2, "Unexpected null edata");
memset(&hpa_stats, 0, sizeof(hpa_shard_stats_t));
hpa_shard_stats_merge(tsdn, shard, &hpa_stats);
expect_zu_eq(hpa_stats.psset_stats.merged.nactive, NALLOCS - 1, "");
expect_zu_eq(hpa_stats.psset_stats.merged.nactive,
expected_native_minus_flushed, "");
expect_zu_eq(hpa_stats.secstats.bytes, 0.75 * sec_opts.max_bytes - PAGE,
"sec should have max_bytes minus one page that just came from it");
@ -225,8 +234,9 @@ TEST_BEGIN(test_hpa_sec) {
pai_dalloc(tsdn, &shard->pai, edata2, &deferred_work_generated);
memset(&hpa_stats, 0, sizeof(hpa_shard_stats_t));
hpa_shard_stats_merge(tsdn, shard, &hpa_stats);
expect_zu_eq(hpa_stats.psset_stats.merged.nactive, NALLOCS - 1, "");
expect_zu_eq(hpa_stats.psset_stats.merged.ndirty, 3, "");
expect_zu_eq(hpa_stats.psset_stats.merged.nactive,
expected_native_minus_flushed, "");
expect_zu_eq(hpa_stats.psset_stats.merged.ndirty, flushed_extends, "");
expect_zu_eq(hpa_stats.secstats.bytes, 0.75 * sec_opts.max_bytes, "");
destroy_test_data(shard);

View file

@ -313,7 +313,6 @@ TEST_BEGIN(test_mallctl_opt) {
TEST_MALLCTL_OPT(size_t, hpa_sec_nshards, always);
TEST_MALLCTL_OPT(size_t, hpa_sec_max_alloc, always);
TEST_MALLCTL_OPT(size_t, hpa_sec_max_bytes, always);
TEST_MALLCTL_OPT(size_t, hpa_sec_batch_fill_extra, always);
TEST_MALLCTL_OPT(ssize_t, experimental_hpa_max_purge_nhp, always);
TEST_MALLCTL_OPT(size_t, hpa_purge_threshold, always);
TEST_MALLCTL_OPT(uint64_t, hpa_min_purge_delay_ms, always);

View file

@ -69,7 +69,6 @@ TEST_BEGIN(test_sec_fill) {
opts.nshards = 1;
opts.max_alloc = 2 * PAGE;
opts.max_bytes = 4 * PAGE;
opts.batch_fill_extra = 2;
tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
test_data_init(tsdn, &tdata, &opts);
@ -114,7 +113,6 @@ TEST_BEGIN(test_sec_alloc) {
opts.nshards = 1;
opts.max_alloc = 2 * PAGE;
opts.max_bytes = 4 * PAGE;
opts.batch_fill_extra = 1;
tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
test_data_init(tsdn, &tdata, &opts);