mirror of
https://github.com/jemalloc/jemalloc.git
synced 2026-04-14 22:51:50 +03:00
HPA: Fix infinite purging loop
One of the condition to start purging is `hpa_hugify_blocked_by_ndirty` function call returns true. This can happen in cases where we have no dirty memory for this shard at all. In this case purging loop will be an infinite loop. `hpa_hugify_blocked_by_ndirty` was introduced at0f6c420, but at that time purging loop has different form and additional `break` was not required. Purging loop form was re-written at6630c5989, but additional exit condition wasn't added there at the time. Repo code was shared by Patrik Dokoupil at [1], I stripped it down to minimum to reproduce issue in jemalloc unit tests. [1]: https://github.com/jemalloc/jemalloc/pull/2533
This commit is contained in:
parent
fa451de17f
commit
47d69b4eab
2 changed files with 57 additions and 4 deletions
11
src/hpa.c
11
src/hpa.c
|
|
@ -537,9 +537,16 @@ hpa_shard_maybe_do_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard,
|
|||
purged = false;
|
||||
while (hpa_should_purge(tsdn, shard) && nops < max_ops) {
|
||||
purged = hpa_try_purge(tsdn, shard);
|
||||
if (purged) {
|
||||
nops++;
|
||||
if (!purged) {
|
||||
/*
|
||||
* It is fine if we couldn't purge as sometimes
|
||||
* we try to purge just to unblock
|
||||
* hugification, but there is maybe no dirty
|
||||
* pages at all at the moment.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
nops++;
|
||||
}
|
||||
hugified = hpa_try_hugify(tsdn, shard);
|
||||
if (hugified) {
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ struct test_data_s {
|
|||
static hpa_shard_opts_t test_hpa_shard_opts_default = {
|
||||
/* slab_max_alloc */
|
||||
ALLOC_MAX,
|
||||
/* hugification threshold */
|
||||
/* hugification_threshold */
|
||||
HUGEPAGE,
|
||||
/* dirty_mult */
|
||||
FXP_INIT_PERCENT(25),
|
||||
|
|
@ -36,6 +36,21 @@ static hpa_shard_opts_t test_hpa_shard_opts_default = {
|
|||
5 * 1000,
|
||||
};
|
||||
|
||||
static hpa_shard_opts_t test_hpa_shard_opts_purge = {
|
||||
/* slab_max_alloc */
|
||||
HUGEPAGE,
|
||||
/* hugification_threshold */
|
||||
0.9 * HUGEPAGE,
|
||||
/* dirty_mult */
|
||||
FXP_INIT_PERCENT(11),
|
||||
/* deferral_allowed */
|
||||
true,
|
||||
/* hugify_delay_ms */
|
||||
0,
|
||||
/* min_purge_interval_ms */
|
||||
5 * 1000,
|
||||
};
|
||||
|
||||
static hpa_shard_t *
|
||||
create_test_data(const hpa_hooks_t *hooks, hpa_shard_opts_t *opts) {
|
||||
bool err;
|
||||
|
|
@ -452,6 +467,36 @@ TEST_BEGIN(test_defer_time) {
|
|||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_purge_no_infinite_loop) {
|
||||
test_skip_if(!hpa_supported());
|
||||
|
||||
hpa_shard_t *shard = create_test_data(&hpa_hooks_default,
|
||||
&test_hpa_shard_opts_purge);
|
||||
tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
|
||||
|
||||
/*
|
||||
* This is not arbitrary value, it is chosen to met hugification
|
||||
* criteria for huge page and at the same time do not allow hugify page
|
||||
* without triggering a purge.
|
||||
*/
|
||||
const size_t npages =
|
||||
test_hpa_shard_opts_purge.hugification_threshold / PAGE + 1;
|
||||
const size_t size = npages * PAGE;
|
||||
|
||||
bool deferred_work_generated = false;
|
||||
edata_t *edata = pai_alloc(tsdn, &shard->pai, size, PAGE,
|
||||
/* zero */ false, /* guarded */ false, /* frequent_reuse */ false,
|
||||
&deferred_work_generated);
|
||||
expect_ptr_not_null(edata, "Unexpected alloc failure");
|
||||
|
||||
hpa_shard_do_deferred_work(tsdn, shard);
|
||||
|
||||
/* hpa_shard_do_deferred_work should not stuck in a purging loop */
|
||||
|
||||
destroy_test_data(shard);
|
||||
}
|
||||
TEST_END
|
||||
|
||||
int
|
||||
main(void) {
|
||||
/*
|
||||
|
|
@ -470,5 +515,6 @@ main(void) {
|
|||
test_alloc_max,
|
||||
test_stress,
|
||||
test_alloc_dalloc_batch,
|
||||
test_defer_time);
|
||||
test_defer_time,
|
||||
test_purge_no_infinite_loop);
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue