From 867c6dd7dc88adb0489b8b815dd70c68807325fc Mon Sep 17 00:00:00 2001 From: Dmitry Ilvokhin Date: Fri, 7 Jun 2024 05:32:24 -0700 Subject: [PATCH] Option to guard `hpa_min_purge_interval_ms` fix Change in `hpa_min_purge_interval_ms` handling logic is not backward compatible as it might increase memory usage. Now this logic guarded by `hpa_strict_min_purge_interval` option. When `hpa_strict_min_purge_interval` is true, we will purge no more than `hpa_min_purge_interval_ms`. When `hpa_strict_min_purge_interval` is false, old purging logic behaviour is preserved. Long term strategy migrate all users of hpa to new logic and then delete `hpa_strict_min_purge_interval` option. --- include/jemalloc/internal/hpa_opts.h | 12 +++++++++++- src/ctl.c | 4 ++++ src/hpa.c | 10 ++++++---- src/jemalloc.c | 4 ++++ src/stats.c | 1 + test/unit/hpa_background_thread.sh | 2 +- 6 files changed, 27 insertions(+), 6 deletions(-) diff --git a/include/jemalloc/internal/hpa_opts.h b/include/jemalloc/internal/hpa_opts.h index 6e58c86b..93add641 100644 --- a/include/jemalloc/internal/hpa_opts.h +++ b/include/jemalloc/internal/hpa_opts.h @@ -49,6 +49,14 @@ struct hpa_shard_opts_s { * Minimum amount of time between purges. */ uint64_t min_purge_interval_ms; + + /* + * Strictly respect minimum amout of time between purges. + * + * This is an option to provide backward compatibility for staged rollout of + * purging logic fix. + */ + bool strict_min_purge_interval; }; #define HPA_SHARD_OPTS_DEFAULT { \ @@ -69,7 +77,9 @@ struct hpa_shard_opts_s { /* hugify_delay_ms */ \ 10 * 1000, \ /* min_purge_interval_ms */ \ - 5 * 1000 \ + 5 * 1000, \ + /* strict_min_purge_interval */ \ + false \ } #endif /* JEMALLOC_INTERNAL_HPA_OPTS_H */ diff --git a/src/ctl.c b/src/ctl.c index 4347dd2b..62589d77 100644 --- a/src/ctl.c +++ b/src/ctl.c @@ -103,6 +103,7 @@ CTL_PROTO(opt_hpa_slab_max_alloc) CTL_PROTO(opt_hpa_hugification_threshold) CTL_PROTO(opt_hpa_hugify_delay_ms) CTL_PROTO(opt_hpa_min_purge_interval_ms) +CTL_PROTO(opt_hpa_strict_min_purge_interval) CTL_PROTO(opt_hpa_dirty_mult) CTL_PROTO(opt_hpa_sec_nshards) CTL_PROTO(opt_hpa_sec_max_alloc) @@ -459,6 +460,7 @@ static const ctl_named_node_t opt_node[] = { CTL(opt_hpa_hugification_threshold)}, {NAME("hpa_hugify_delay_ms"), CTL(opt_hpa_hugify_delay_ms)}, {NAME("hpa_min_purge_interval_ms"), CTL(opt_hpa_min_purge_interval_ms)}, + {NAME("hpa_strict_min_purge_interval"), CTL(opt_hpa_strict_min_purge_interval)}, {NAME("hpa_dirty_mult"), CTL(opt_hpa_dirty_mult)}, {NAME("hpa_sec_nshards"), CTL(opt_hpa_sec_nshards)}, {NAME("hpa_sec_max_alloc"), CTL(opt_hpa_sec_max_alloc)}, @@ -2193,6 +2195,8 @@ CTL_RO_NL_GEN(opt_hpa_hugification_threshold, CTL_RO_NL_GEN(opt_hpa_hugify_delay_ms, opt_hpa_opts.hugify_delay_ms, uint64_t) CTL_RO_NL_GEN(opt_hpa_min_purge_interval_ms, opt_hpa_opts.min_purge_interval_ms, uint64_t) +CTL_RO_NL_GEN(opt_hpa_strict_min_purge_interval, + opt_hpa_opts.strict_min_purge_interval, bool) /* * This will have to change before we publicly document this option; fxp_t and diff --git a/src/hpa.c b/src/hpa.c index fe925ad4..49d6b037 100644 --- a/src/hpa.c +++ b/src/hpa.c @@ -382,10 +382,12 @@ hpa_try_purge(tsdn_t *tsdn, hpa_shard_t *shard) { * Make sure we respect purge interval setting and don't purge * too frequently. */ - uint64_t since_last_purge_ms = shard->central->hooks.ms_since( - &shard->last_purge); - if (since_last_purge_ms < shard->opts.min_purge_interval_ms) { - return false; + if (shard->opts.strict_min_purge_interval) { + uint64_t since_last_purge_ms = shard->central->hooks.ms_since( + &shard->last_purge); + if (since_last_purge_ms < shard->opts.min_purge_interval_ms) { + return false; + } } hpdata_t *to_purge = psset_pick_purge(&shard->psset); diff --git a/src/jemalloc.c b/src/jemalloc.c index 89f4b29d..abd7540f 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -1554,6 +1554,10 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], "hpa_min_purge_interval_ms", 0, 0, CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, false); + CONF_HANDLE_BOOL( + opt_hpa_opts.strict_min_purge_interval, + "hpa_strict_min_purge_interval"); + if (CONF_MATCH("hpa_dirty_mult")) { if (CONF_MATCH_VALUE("-1")) { opt_hpa_opts.dirty_mult = (fxp_t)-1; diff --git a/src/stats.c b/src/stats.c index 4df0ae62..726007f5 100644 --- a/src/stats.c +++ b/src/stats.c @@ -1564,6 +1564,7 @@ stats_general_print(emitter_t *emitter) { OPT_WRITE_SIZE_T("hpa_hugification_threshold") OPT_WRITE_UINT64("hpa_hugify_delay_ms") OPT_WRITE_UINT64("hpa_min_purge_interval_ms") + OPT_WRITE_BOOL("hpa_strict_min_purge_interval") if (je_mallctl("opt.hpa_dirty_mult", (void *)&u32v, &u32sz, NULL, 0) == 0) { /* diff --git a/test/unit/hpa_background_thread.sh b/test/unit/hpa_background_thread.sh index 33b70e19..666da8fc 100644 --- a/test/unit/hpa_background_thread.sh +++ b/test/unit/hpa_background_thread.sh @@ -1,4 +1,4 @@ #!/bin/sh -export MALLOC_CONF="hpa_dirty_mult:0.001,hpa_hugification_threshold_ratio:1.0,hpa_min_purge_interval_ms:50,hpa_sec_nshards:0" +export MALLOC_CONF="hpa_dirty_mult:0.001,hpa_hugification_threshold_ratio:1.0,hpa_min_purge_interval_ms:50,hpa_strict_min_purge_interval:true,hpa_sec_nshards:0"