Add HPA deferral functionality.

This commit is contained in:
David Goldblatt 2021-06-04 16:07:27 -07:00 committed by David Goldblatt
parent ace329d11b
commit 583284f2d9
7 changed files with 76 additions and 12 deletions

View file

@ -461,6 +461,12 @@ arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) {
arena_decay_muzzy(tsdn, arena, is_background_thread, all);
}
/* Called from background threads. */
void
arena_do_deferred_work(tsdn_t *tsdn, arena_t *arena) {
arena_decay(tsdn, arena, true, false);
}
void
arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab) {
bool generated_dirty;
@ -1565,7 +1571,9 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
* so arena_hpa_global is not yet initialized.
*/
if (opt_hpa && ehooks_are_default(base_ehooks_get(base)) && ind != 0) {
if (pa_shard_enable_hpa(tsdn, &arena->pa_shard, &opt_hpa_opts,
hpa_shard_opts_t hpa_shard_opts = opt_hpa_opts;
hpa_shard_opts.deferral_allowed = background_thread_enabled();
if (pa_shard_enable_hpa(tsdn, &arena->pa_shard, &hpa_shard_opts,
&opt_hpa_sec_opts)) {
goto label_error;
}

View file

@ -291,7 +291,7 @@ background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info, unsigne
if (!arena) {
continue;
}
arena_decay(tsdn, arena, true, false);
arena_do_deferred_work(tsdn, arena);
if (min_interval == BACKGROUND_THREAD_MIN_INTERVAL_NS) {
/* Min interval will be used. */
continue;

View file

@ -426,17 +426,29 @@ hpa_try_hugify(tsdn_t *tsdn, hpa_shard_t *shard) {
return true;
}
/*
* Execution of deferred work is forced if it's triggered by an explicit
* hpa_shard_do_deferred_work() call.
*/
static void
hpa_do_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard) {
hpa_shard_maybe_do_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard,
bool forced) {
bool hugified;
bool purged;
size_t nloop = 0;
/* Just *some* bound, to impose a worst-case latency bound. */
size_t maxloops = 100;;
malloc_mutex_assert_owner(tsdn, &shard->mtx);
if (!forced && shard->opts.deferral_allowed) {
return;
}
/*
* If we're on a background thread, do work so long as there's work to
* be done. Otherwise, bound latency to not be *too* bad by doing at
* most a small fixed number of operations.
*/
size_t maxloops = (forced ? (size_t)-1 : 8);
do {
malloc_mutex_assert_owner(tsdn, &shard->mtx);
hugified = hpa_try_hugify(tsdn, shard);
malloc_mutex_assert_owner(tsdn, &shard->mtx);
purged = false;
if (hpa_should_purge(tsdn, shard)) {
purged = hpa_try_purge(tsdn, shard);
@ -528,7 +540,7 @@ hpa_try_alloc_batch_no_grow(tsdn_t *tsdn, hpa_shard_t *shard, size_t size,
edata_list_active_append(results, edata);
}
hpa_do_deferred_work(tsdn, shard);
hpa_shard_maybe_do_deferred_work(tsdn, shard, /* forced */ false);
malloc_mutex_unlock(tsdn, &shard->mtx);
return nsuccess;
}
@ -740,7 +752,7 @@ hpa_dalloc_batch(tsdn_t *tsdn, pai_t *self, edata_list_active_t *list) {
edata_list_active_remove(list, edata);
hpa_dalloc_locked(tsdn, shard, edata);
}
hpa_do_deferred_work(tsdn, shard);
hpa_shard_maybe_do_deferred_work(tsdn, shard, /* forced */ false);
malloc_mutex_unlock(tsdn, &shard->mtx);
}
@ -800,6 +812,26 @@ hpa_shard_destroy(tsdn_t *tsdn, hpa_shard_t *shard) {
}
}
void
hpa_shard_set_deferral_allowed(tsdn_t *tsdn, hpa_shard_t *shard,
bool deferral_allowed) {
malloc_mutex_lock(tsdn, &shard->mtx);
bool deferral_previously_allowed = shard->opts.deferral_allowed;
shard->opts.deferral_allowed = deferral_allowed;
if (deferral_previously_allowed && !deferral_allowed) {
hpa_shard_maybe_do_deferred_work(tsdn, shard,
/* forced */ true);
}
malloc_mutex_unlock(tsdn, &shard->mtx);
}
void
hpa_shard_do_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard) {
malloc_mutex_lock(tsdn, &shard->mtx);
hpa_shard_maybe_do_deferred_work(tsdn, shard, /* forced */ true);
malloc_mutex_unlock(tsdn, &shard->mtx);
}
void
hpa_shard_prefork3(tsdn_t *tsdn, hpa_shard_t *shard) {
malloc_mutex_prefork(tsdn, &shard->grow_mtx);

View file

@ -1788,8 +1788,10 @@ malloc_init_hard_a0_locked() {
opt_hpa = false;
}
} else if (opt_hpa) {
if (pa_shard_enable_hpa(TSDN_NULL, &a0->pa_shard, &opt_hpa_opts,
&opt_hpa_sec_opts)) {
hpa_shard_opts_t hpa_shard_opts = opt_hpa_opts;
hpa_shard_opts.deferral_allowed = background_thread_enabled();
if (pa_shard_enable_hpa(TSDN_NULL, &a0->pa_shard,
&hpa_shard_opts, &opt_hpa_sec_opts)) {
return true;
}
}