diff --git a/src/hpa.c b/src/hpa.c index 99d1f033..6b1ae2ce 100644 --- a/src/hpa.c +++ b/src/hpa.c @@ -537,9 +537,16 @@ hpa_shard_maybe_do_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard, purged = false; while (hpa_should_purge(tsdn, shard) && nops < max_ops) { purged = hpa_try_purge(tsdn, shard); - if (purged) { - nops++; + if (!purged) { + /* + * It is fine if we couldn't purge as sometimes + * we try to purge just to unblock + * hugification, but there is maybe no dirty + * pages at all at the moment. + */ + break; } + nops++; } hugified = hpa_try_hugify(tsdn, shard); if (hugified) { diff --git a/test/unit/hpa.c b/test/unit/hpa.c index 9e3160b4..a8a26e13 100644 --- a/test/unit/hpa.c +++ b/test/unit/hpa.c @@ -24,7 +24,7 @@ struct test_data_s { static hpa_shard_opts_t test_hpa_shard_opts_default = { /* slab_max_alloc */ ALLOC_MAX, - /* hugification threshold */ + /* hugification_threshold */ HUGEPAGE, /* dirty_mult */ FXP_INIT_PERCENT(25), @@ -36,6 +36,21 @@ static hpa_shard_opts_t test_hpa_shard_opts_default = { 5 * 1000, }; +static hpa_shard_opts_t test_hpa_shard_opts_purge = { + /* slab_max_alloc */ + HUGEPAGE, + /* hugification_threshold */ + 0.9 * HUGEPAGE, + /* dirty_mult */ + FXP_INIT_PERCENT(11), + /* deferral_allowed */ + true, + /* hugify_delay_ms */ + 0, + /* min_purge_interval_ms */ + 5 * 1000, +}; + static hpa_shard_t * create_test_data(const hpa_hooks_t *hooks, hpa_shard_opts_t *opts) { bool err; @@ -452,6 +467,36 @@ TEST_BEGIN(test_defer_time) { } TEST_END +TEST_BEGIN(test_purge_no_infinite_loop) { + test_skip_if(!hpa_supported()); + + hpa_shard_t *shard = create_test_data(&hpa_hooks_default, + &test_hpa_shard_opts_purge); + tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); + + /* + * This is not arbitrary value, it is chosen to met hugification + * criteria for huge page and at the same time do not allow hugify page + * without triggering a purge. + */ + const size_t npages = + test_hpa_shard_opts_purge.hugification_threshold / PAGE + 1; + const size_t size = npages * PAGE; + + bool deferred_work_generated = false; + edata_t *edata = pai_alloc(tsdn, &shard->pai, size, PAGE, + /* zero */ false, /* guarded */ false, /* frequent_reuse */ false, + &deferred_work_generated); + expect_ptr_not_null(edata, "Unexpected alloc failure"); + + hpa_shard_do_deferred_work(tsdn, shard); + + /* hpa_shard_do_deferred_work should not stuck in a purging loop */ + + destroy_test_data(shard); +} +TEST_END + int main(void) { /* @@ -470,5 +515,6 @@ main(void) { test_alloc_max, test_stress, test_alloc_dalloc_batch, - test_defer_time); + test_defer_time, + test_purge_no_infinite_loop); }