mirror of
https://github.com/jemalloc/jemalloc.git
synced 2026-04-14 22:51:50 +03:00
Respect hpa_min_purge_interval_ms option
Currently, hugepages aware allocator backend works together with classic one as a fallback for not yet supported allocations. When background threads are enabled wake up time for classic interfere with hpa as there were no checks inside hpa purging logic to check if we are not purging too frequently. If background thread is running and `hpa_should_purge` returns true, then we will purge, even if we purged less than hpa_min_purge_interval_ms ago.
This commit is contained in:
parent
90c627edb7
commit
91a6d230db
2 changed files with 48 additions and 14 deletions
10
src/hpa.c
10
src/hpa.c
|
|
@ -378,6 +378,16 @@ static bool
|
|||
hpa_try_purge(tsdn_t *tsdn, hpa_shard_t *shard) {
|
||||
malloc_mutex_assert_owner(tsdn, &shard->mtx);
|
||||
|
||||
/*
|
||||
* Make sure we respect purge interval setting and don't purge
|
||||
* too frequently.
|
||||
*/
|
||||
uint64_t since_last_purge_ms = shard->central->hooks.ms_since(
|
||||
&shard->last_purge);
|
||||
if (since_last_purge_ms < shard->opts.min_purge_interval_ms) {
|
||||
return false;
|
||||
}
|
||||
|
||||
hpdata_t *to_purge = psset_pick_purge(&shard->psset);
|
||||
if (to_purge == NULL) {
|
||||
return false;
|
||||
|
|
|
|||
|
|
@ -83,7 +83,36 @@ wait_until_thread_is_enabled(unsigned arena_id) {
|
|||
}
|
||||
|
||||
static void
|
||||
expect_purging(unsigned arena_ind, bool expect_deferred) {
|
||||
expect_purging(unsigned arena_ind) {
|
||||
size_t empty_ndirty = get_empty_ndirty(arena_ind);
|
||||
expect_zu_eq(0, empty_ndirty, "Expected arena to start unused.");
|
||||
|
||||
void *ptrs[2];
|
||||
ptrs[0] = mallocx(PAGE,
|
||||
MALLOCX_TCACHE_NONE | MALLOCX_ARENA(arena_ind));
|
||||
ptrs[1] = mallocx(PAGE,
|
||||
MALLOCX_TCACHE_NONE | MALLOCX_ARENA(arena_ind));
|
||||
|
||||
empty_ndirty = get_empty_ndirty(arena_ind);
|
||||
expect_zu_eq(0, empty_ndirty, "All pages should be active");
|
||||
|
||||
dallocx(ptrs[0], MALLOCX_TCACHE_NONE);
|
||||
expect_true(empty_ndirty == 0 || empty_ndirty == 1,
|
||||
"Unexpected extra dirty page count: %zu", empty_ndirty);
|
||||
|
||||
/*
|
||||
* Wait for at least hpa_min_purge_interval_ms to trigger purge on next
|
||||
* deallocation.
|
||||
*/
|
||||
sleep_for_background_thread_interval();
|
||||
|
||||
dallocx(ptrs[1], MALLOCX_TCACHE_NONE);
|
||||
empty_ndirty = get_empty_ndirty(arena_ind);
|
||||
expect_zu_eq(0, empty_ndirty, "There are should be no dirty pages");
|
||||
}
|
||||
|
||||
static void
|
||||
expect_deferred_purging(unsigned arena_ind) {
|
||||
size_t empty_ndirty;
|
||||
|
||||
empty_ndirty = get_empty_ndirty(arena_ind);
|
||||
|
|
@ -103,20 +132,15 @@ expect_purging(unsigned arena_ind, bool expect_deferred) {
|
|||
expect_zu_eq(0, empty_ndirty, "All pages should be active");
|
||||
dallocx(ptr, MALLOCX_TCACHE_NONE);
|
||||
empty_ndirty = get_empty_ndirty(arena_ind);
|
||||
if (expect_deferred) {
|
||||
expect_true(empty_ndirty == 0 || empty_ndirty == 1 ||
|
||||
opt_prof, "Unexpected extra dirty page count: %zu",
|
||||
empty_ndirty);
|
||||
} else {
|
||||
assert_zu_eq(0, empty_ndirty,
|
||||
"Saw dirty pages without deferred purging");
|
||||
}
|
||||
expect_true(empty_ndirty == 0 || empty_ndirty == 1 ||
|
||||
opt_prof, "Unexpected extra dirty page count: %zu",
|
||||
empty_ndirty);
|
||||
if (empty_ndirty > 0) {
|
||||
observed_dirty_page = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
expect_b_eq(expect_deferred, observed_dirty_page, "");
|
||||
expect_true(observed_dirty_page, "");
|
||||
|
||||
/*
|
||||
* Under high concurrency / heavy test load (e.g. using run_test.sh),
|
||||
|
|
@ -125,7 +149,7 @@ expect_purging(unsigned arena_ind, bool expect_deferred) {
|
|||
*/
|
||||
unsigned retry = 0;
|
||||
while ((empty_ndirty = get_empty_ndirty(arena_ind)) > 0 &&
|
||||
expect_deferred && (retry++ < 100)) {
|
||||
(retry++ < 100)) {
|
||||
sleep_for_background_thread_interval();
|
||||
}
|
||||
|
||||
|
|
@ -144,7 +168,7 @@ TEST_BEGIN(test_hpa_background_thread_purges) {
|
|||
* Our .sh sets dirty mult to 0, so all dirty pages should get purged
|
||||
* any time any thread frees.
|
||||
*/
|
||||
expect_purging(arena_ind, /* expect_deferred */ true);
|
||||
expect_deferred_purging(arena_ind);
|
||||
}
|
||||
TEST_END
|
||||
|
||||
|
|
@ -158,11 +182,11 @@ TEST_BEGIN(test_hpa_background_thread_enable_disable) {
|
|||
unsigned arena_ind = create_arena();
|
||||
|
||||
set_background_thread_enabled(false);
|
||||
expect_purging(arena_ind, false);
|
||||
expect_purging(arena_ind);
|
||||
|
||||
set_background_thread_enabled(true);
|
||||
wait_until_thread_is_enabled(arena_ind);
|
||||
expect_purging(arena_ind, true);
|
||||
expect_deferred_purging(arena_ind);
|
||||
}
|
||||
TEST_END
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue