From 4f4fd424477142ee9962fcf4e4cd0349d4e6e4d3 Mon Sep 17 00:00:00 2001 From: Dmitry Ilvokhin Date: Thu, 29 Aug 2024 10:49:31 -0700 Subject: [PATCH] Remove `strict_min_purge_interval` option Option `experimental_hpa_strict_min_purge_interval` was expected to be temporary to simplify rollout of a bugfix. Now, when bugfix rollout is complete it is safe to remove this option. --- include/jemalloc/internal/hpa_opts.h | 10 ---------- src/ctl.c | 5 ----- src/hpa.c | 11 +++-------- src/jemalloc.c | 4 ---- src/stats.c | 1 - test/unit/hpa.c | 29 +++++++++++++++++----------- test/unit/mallctl.c | 2 -- 7 files changed, 21 insertions(+), 41 deletions(-) diff --git a/include/jemalloc/internal/hpa_opts.h b/include/jemalloc/internal/hpa_opts.h index 15765689..ee2bd40c 100644 --- a/include/jemalloc/internal/hpa_opts.h +++ b/include/jemalloc/internal/hpa_opts.h @@ -50,14 +50,6 @@ struct hpa_shard_opts_s { */ uint64_t min_purge_interval_ms; - /* - * Strictly respect minimum amout of time between purges. - * - * This is an option to provide backward compatibility for staged rollout of - * purging logic fix. - */ - bool experimental_strict_min_purge_interval; - /* * Maximum number of hugepages to purge on each purging attempt. */ @@ -83,8 +75,6 @@ struct hpa_shard_opts_s { 10 * 1000, \ /* min_purge_interval_ms */ \ 5 * 1000, \ - /* experimental_strict_min_purge_interval */ \ - false, \ /* experimental_max_purge_nhp */ \ -1 \ } diff --git a/src/ctl.c b/src/ctl.c index 8608f124..2a9e47f2 100644 --- a/src/ctl.c +++ b/src/ctl.c @@ -103,7 +103,6 @@ CTL_PROTO(opt_hpa_slab_max_alloc) CTL_PROTO(opt_hpa_hugification_threshold) CTL_PROTO(opt_hpa_hugify_delay_ms) CTL_PROTO(opt_hpa_min_purge_interval_ms) -CTL_PROTO(opt_experimental_hpa_strict_min_purge_interval) CTL_PROTO(opt_experimental_hpa_max_purge_nhp) CTL_PROTO(opt_hpa_dirty_mult) CTL_PROTO(opt_hpa_sec_nshards) @@ -462,8 +461,6 @@ static const ctl_named_node_t opt_node[] = { CTL(opt_hpa_hugification_threshold)}, {NAME("hpa_hugify_delay_ms"), CTL(opt_hpa_hugify_delay_ms)}, {NAME("hpa_min_purge_interval_ms"), CTL(opt_hpa_min_purge_interval_ms)}, - {NAME("experimental_hpa_strict_min_purge_interval"), - CTL(opt_experimental_hpa_strict_min_purge_interval)}, {NAME("experimental_hpa_max_purge_nhp"), CTL(opt_experimental_hpa_max_purge_nhp)}, {NAME("hpa_dirty_mult"), CTL(opt_hpa_dirty_mult)}, @@ -2202,8 +2199,6 @@ CTL_RO_NL_GEN(opt_hpa_hugification_threshold, CTL_RO_NL_GEN(opt_hpa_hugify_delay_ms, opt_hpa_opts.hugify_delay_ms, uint64_t) CTL_RO_NL_GEN(opt_hpa_min_purge_interval_ms, opt_hpa_opts.min_purge_interval_ms, uint64_t) -CTL_RO_NL_GEN(opt_experimental_hpa_strict_min_purge_interval, - opt_hpa_opts.experimental_strict_min_purge_interval, bool) CTL_RO_NL_GEN(opt_experimental_hpa_max_purge_nhp, opt_hpa_opts.experimental_max_purge_nhp, ssize_t) diff --git a/src/hpa.c b/src/hpa.c index 3d7a6f60..d58a17ec 100644 --- a/src/hpa.c +++ b/src/hpa.c @@ -512,14 +512,9 @@ hpa_try_hugify(tsdn_t *tsdn, hpa_shard_t *shard) { static bool hpa_min_purge_interval_passed(tsdn_t *tsdn, hpa_shard_t *shard) { malloc_mutex_assert_owner(tsdn, &shard->mtx); - if (shard->opts.experimental_strict_min_purge_interval) { - uint64_t since_last_purge_ms = shard->central->hooks.ms_since( - &shard->last_purge); - if (since_last_purge_ms < shard->opts.min_purge_interval_ms) { - return false; - } - } - return true; + uint64_t since_last_purge_ms = shard->central->hooks.ms_since( + &shard->last_purge); + return since_last_purge_ms >= shard->opts.min_purge_interval_ms; } /* diff --git a/src/jemalloc.c b/src/jemalloc.c index 63f6b302..428a50ef 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -1571,10 +1571,6 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], "hpa_min_purge_interval_ms", 0, 0, CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, false); - CONF_HANDLE_BOOL( - opt_hpa_opts.experimental_strict_min_purge_interval, - "experimental_hpa_strict_min_purge_interval"); - CONF_HANDLE_SSIZE_T( opt_hpa_opts.experimental_max_purge_nhp, "experimental_hpa_max_purge_nhp", -1, SSIZE_MAX); diff --git a/src/stats.c b/src/stats.c index ef025eb3..d5be92d3 100644 --- a/src/stats.c +++ b/src/stats.c @@ -1564,7 +1564,6 @@ stats_general_print(emitter_t *emitter) { OPT_WRITE_SIZE_T("hpa_hugification_threshold") OPT_WRITE_UINT64("hpa_hugify_delay_ms") OPT_WRITE_UINT64("hpa_min_purge_interval_ms") - OPT_WRITE_BOOL("experimental_hpa_strict_min_purge_interval") OPT_WRITE_SSIZE_T("experimental_hpa_max_purge_nhp") if (je_mallctl("opt.hpa_dirty_mult", (void *)&u32v, &u32sz, NULL, 0) == 0) { diff --git a/test/unit/hpa.c b/test/unit/hpa.c index ae8a976c..747f98ef 100644 --- a/test/unit/hpa.c +++ b/test/unit/hpa.c @@ -34,8 +34,6 @@ static hpa_shard_opts_t test_hpa_shard_opts_default = { 10 * 1000, /* min_purge_interval_ms */ 5 * 1000, - /* experimental_strict_min_purge_interval */ - false, /* experimental_max_purge_nhp */ -1 }; @@ -53,8 +51,6 @@ static hpa_shard_opts_t test_hpa_shard_opts_purge = { 0, /* min_purge_interval_ms */ 5 * 1000, - /* experimental_strict_min_purge_interval */ - false, /* experimental_max_purge_nhp */ -1 }; @@ -506,7 +502,7 @@ TEST_BEGIN(test_purge_no_infinite_loop) { } TEST_END -TEST_BEGIN(test_no_experimental_strict_min_purge_interval) { +TEST_BEGIN(test_no_min_purge_interval) { test_skip_if(!hpa_supported()); hpa_hooks_t hooks; @@ -520,6 +516,7 @@ TEST_BEGIN(test_no_experimental_strict_min_purge_interval) { hpa_shard_opts_t opts = test_hpa_shard_opts_default; opts.deferral_allowed = true; + opts.min_purge_interval_ms = 0; hpa_shard_t *shard = create_test_data(&hooks, &opts); @@ -547,7 +544,7 @@ TEST_BEGIN(test_no_experimental_strict_min_purge_interval) { } TEST_END -TEST_BEGIN(test_experimental_strict_min_purge_interval) { +TEST_BEGIN(test_min_purge_interval) { test_skip_if(!hpa_supported()); hpa_hooks_t hooks; @@ -561,7 +558,6 @@ TEST_BEGIN(test_experimental_strict_min_purge_interval) { hpa_shard_opts_t opts = test_hpa_shard_opts_default; opts.deferral_allowed = true; - opts.experimental_strict_min_purge_interval = true; hpa_shard_t *shard = create_test_data(&hooks, &opts); @@ -631,6 +627,7 @@ TEST_BEGIN(test_purge) { pai_dalloc(tsdn, &shard->pai, edatas[i], &deferred_work_generated); } + nstime_init2(&defer_curtime, 6, 0); hpa_shard_do_deferred_work(tsdn, shard); expect_zu_eq(0, ndefer_hugify_calls, "Hugified too early"); @@ -642,9 +639,15 @@ TEST_BEGIN(test_purge) { expect_zu_eq(2, ndefer_purge_calls, "Expect purges"); ndefer_purge_calls = 0; + nstime_init2(&defer_curtime, 12, 0); hpa_shard_do_deferred_work(tsdn, shard); - expect_zu_eq(0, ndefer_hugify_calls, "Hugified too early"); + /* + * We are still having 5 active hugepages and now they are + * matching hugification criteria long enough to actually hugify them. + */ + expect_zu_eq(5, ndefer_hugify_calls, "Expect hugification"); + ndefer_hugify_calls = 0; expect_zu_eq(0, ndefer_dehugify_calls, "Dehugified too early"); /* * We still have completely dirty hugepage, but we are below @@ -691,6 +694,7 @@ TEST_BEGIN(test_experimental_max_purge_nhp) { pai_dalloc(tsdn, &shard->pai, edatas[i], &deferred_work_generated); } + nstime_init2(&defer_curtime, 6, 0); hpa_shard_do_deferred_work(tsdn, shard); expect_zu_eq(0, ndefer_hugify_calls, "Hugified too early"); @@ -702,14 +706,17 @@ TEST_BEGIN(test_experimental_max_purge_nhp) { expect_zu_eq(1, ndefer_purge_calls, "Expect purges"); ndefer_purge_calls = 0; + nstime_init2(&defer_curtime, 12, 0); hpa_shard_do_deferred_work(tsdn, shard); - expect_zu_eq(0, ndefer_hugify_calls, "Hugified too early"); + expect_zu_eq(5, ndefer_hugify_calls, "Expect hugification"); + ndefer_hugify_calls = 0; expect_zu_eq(0, ndefer_dehugify_calls, "Dehugified too early"); /* We still above the limit for dirty pages. */ expect_zu_eq(1, ndefer_purge_calls, "Expect purge"); ndefer_purge_calls = 0; + nstime_init2(&defer_curtime, 18, 0); hpa_shard_do_deferred_work(tsdn, shard); expect_zu_eq(0, ndefer_hugify_calls, "Hugified too early"); @@ -741,8 +748,8 @@ main(void) { test_alloc_dalloc_batch, test_defer_time, test_purge_no_infinite_loop, - test_no_experimental_strict_min_purge_interval, - test_experimental_strict_min_purge_interval, + test_no_min_purge_interval, + test_min_purge_interval, test_purge, test_experimental_max_purge_nhp); } diff --git a/test/unit/mallctl.c b/test/unit/mallctl.c index 028a27f7..ffe5c411 100644 --- a/test/unit/mallctl.c +++ b/test/unit/mallctl.c @@ -292,8 +292,6 @@ TEST_BEGIN(test_mallctl_opt) { TEST_MALLCTL_OPT(size_t, hpa_sec_max_bytes, always); TEST_MALLCTL_OPT(size_t, hpa_sec_bytes_after_flush, always); TEST_MALLCTL_OPT(size_t, hpa_sec_batch_fill_extra, always); - TEST_MALLCTL_OPT(bool, experimental_hpa_strict_min_purge_interval, - always); TEST_MALLCTL_OPT(ssize_t, experimental_hpa_max_purge_nhp, always); TEST_MALLCTL_OPT(unsigned, narenas, always); TEST_MALLCTL_OPT(const char *, percpu_arena, always);