mirror of
https://github.com/jemalloc/jemalloc.git
synced 2026-05-14 00:46:21 +03:00
Replace PAI vtable dispatch with direct calls
The pai_t interface implements C-style polymorphism via function pointers to abstract over PAC and HPA. This abstraction provides no real benefit: only two implementations exist, the dispatcher already knows which one to use, and HPA stubs 2 of 5 operations. Remove the runtime dispatch in favor of direct calls. This commit: - Promotes pac_alloc/expand/shrink/dalloc/time_until_deferred_work to external linkage and replaces the pai_t *self parameter with pac_t *pac. - Promotes hpa_alloc/expand/shrink/dalloc/time_until_deferred_work to external linkage and replaces pai_t *self with hpa_shard_t *shard. - Updates hpa_dalloc_batch's signature to take hpa_shard_t * directly and removes the hpa_from_pai container-of helper. Updates internal callers in hpa_alloc, hpa_dalloc, and hpa_sec_flush_impl. - Drops the vtable assignments from pac_init() and hpa_shard_init(). - Replaces pai_alloc/dalloc/etc. dispatch in pa.c with direct calls. HPA expand and shrink (which are unconditional failure stubs) are skipped entirely for HPA-owned extents. - Removes the pa_get_pai() helper. - Updates tests in test/unit/hpa.c and test/unit/hpa_sec_integration.c to call hpa_alloc/dalloc/etc. directly. The pai_t struct field stays as dead weight in pac_t and hpa_shard_t; it is removed in the next commit along with pai.h itself. No behavioral changes.
This commit is contained in:
parent
163c871d6c
commit
1dfa6f7aa4
14 changed files with 183 additions and 224 deletions
|
|
@ -154,6 +154,18 @@ bool hpa_shard_init(tsdn_t *tsdn, hpa_shard_t *shard, hpa_central_t *central,
|
|||
emap_t *emap, base_t *base, edata_cache_t *edata_cache, unsigned ind,
|
||||
const hpa_shard_opts_t *opts, const sec_opts_t *sec_opts);
|
||||
|
||||
edata_t *hpa_alloc(tsdn_t *tsdn, hpa_shard_t *shard, size_t size,
|
||||
size_t alignment, bool zero, bool guarded, bool frequent_reuse,
|
||||
bool *deferred_work_generated);
|
||||
bool hpa_expand(tsdn_t *tsdn, hpa_shard_t *shard, edata_t *edata,
|
||||
size_t old_size, size_t new_size, bool zero,
|
||||
bool *deferred_work_generated);
|
||||
bool hpa_shrink(tsdn_t *tsdn, hpa_shard_t *shard, edata_t *edata,
|
||||
size_t old_size, size_t new_size, bool *deferred_work_generated);
|
||||
void hpa_dalloc(tsdn_t *tsdn, hpa_shard_t *shard, edata_t *edata,
|
||||
bool *deferred_work_generated);
|
||||
uint64_t hpa_time_until_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard);
|
||||
|
||||
void hpa_shard_stats_accum(hpa_shard_stats_t *dst, hpa_shard_stats_t *src);
|
||||
void hpa_shard_stats_merge(
|
||||
tsdn_t *tsdn, hpa_shard_t *shard, hpa_shard_stats_t *dst);
|
||||
|
|
|
|||
|
|
@ -96,7 +96,7 @@ struct pa_shard_s {
|
|||
/* Allocates from a PAC. */
|
||||
pac_t pac;
|
||||
|
||||
hpa_shard_t hpa_shard;
|
||||
hpa_shard_t hpa;
|
||||
|
||||
/* The source of edata_t objects. */
|
||||
edata_cache_t edata_cache;
|
||||
|
|
|
|||
|
|
@ -165,6 +165,17 @@ bool pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
|
|||
ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms, pac_stats_t *pac_stats,
|
||||
malloc_mutex_t *stats_mtx);
|
||||
|
||||
edata_t *pac_alloc(tsdn_t *tsdn, pac_t *pac, size_t size, size_t alignment,
|
||||
bool zero, bool guarded, bool frequent_reuse,
|
||||
bool *deferred_work_generated);
|
||||
bool pac_expand(tsdn_t *tsdn, pac_t *pac, edata_t *edata, size_t old_size,
|
||||
size_t new_size, bool zero, bool *deferred_work_generated);
|
||||
bool pac_shrink(tsdn_t *tsdn, pac_t *pac, edata_t *edata, size_t old_size,
|
||||
size_t new_size, bool *deferred_work_generated);
|
||||
void pac_dalloc(tsdn_t *tsdn, pac_t *pac, edata_t *edata,
|
||||
bool *deferred_work_generated);
|
||||
uint64_t pac_time_until_deferred_work(tsdn_t *tsdn, pac_t *pac);
|
||||
|
||||
static inline size_t
|
||||
pac_mapped(const pac_t *pac) {
|
||||
return atomic_load_zu(&pac->stats->pac_mapped, ATOMIC_RELAXED);
|
||||
|
|
|
|||
74
src/hpa.c
74
src/hpa.c
|
|
@ -8,18 +8,7 @@
|
|||
#include "jemalloc/internal/witness.h"
|
||||
#include "jemalloc/internal/jemalloc_probe.h"
|
||||
|
||||
static edata_t *hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||
size_t alignment, bool zero, bool guarded, bool frequent_reuse,
|
||||
bool *deferred_work_generated);
|
||||
static bool hpa_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated);
|
||||
static bool hpa_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
size_t old_size, size_t new_size, bool *deferred_work_generated);
|
||||
static void hpa_dalloc(
|
||||
tsdn_t *tsdn, pai_t *self, edata_t *edata, bool *deferred_work_generated);
|
||||
static uint64_t hpa_time_until_deferred_work(tsdn_t *tsdn, pai_t *self);
|
||||
|
||||
static void hpa_dalloc_batch(tsdn_t *tsdn, pai_t *self,
|
||||
static void hpa_dalloc_batch(tsdn_t *tsdn, hpa_shard_t *shard,
|
||||
edata_list_active_t *list, bool *deferred_work_generated);
|
||||
|
||||
const char *const hpa_hugify_style_names[] = {"auto", "none", "eager", "lazy"};
|
||||
|
|
@ -110,17 +99,6 @@ hpa_shard_init(tsdn_t *tsdn, hpa_shard_t *shard, hpa_central_t *central,
|
|||
shard->stats.nhugify_failures = 0;
|
||||
shard->stats.ndehugifies = 0;
|
||||
|
||||
/*
|
||||
* Fill these in last, so that if an hpa_shard gets used despite
|
||||
* initialization failing, we'll at least crash instead of just
|
||||
* operating on corrupted data.
|
||||
*/
|
||||
shard->pai.alloc = &hpa_alloc;
|
||||
shard->pai.expand = &hpa_expand;
|
||||
shard->pai.shrink = &hpa_shrink;
|
||||
shard->pai.dalloc = &hpa_dalloc;
|
||||
shard->pai.time_until_deferred_work = &hpa_time_until_deferred_work;
|
||||
|
||||
err = sec_init(tsdn, &shard->sec, base, sec_opts);
|
||||
if (err) {
|
||||
return true;
|
||||
|
|
@ -820,15 +798,6 @@ hpa_alloc_batch_psset(tsdn_t *tsdn, hpa_shard_t *shard, size_t size,
|
|||
return nsuccess;
|
||||
}
|
||||
|
||||
static hpa_shard_t *
|
||||
hpa_from_pai(pai_t *self) {
|
||||
assert(self->alloc == &hpa_alloc);
|
||||
assert(self->expand == &hpa_expand);
|
||||
assert(self->shrink == &hpa_shrink);
|
||||
assert(self->dalloc == &hpa_dalloc);
|
||||
return (hpa_shard_t *)self;
|
||||
}
|
||||
|
||||
static void
|
||||
hpa_assert_results(
|
||||
tsdn_t *tsdn, hpa_shard_t *shard, edata_list_active_t *results) {
|
||||
|
|
@ -854,9 +823,10 @@ hpa_assert_results(
|
|||
}
|
||||
}
|
||||
|
||||
static edata_t *
|
||||
hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
|
||||
bool guarded, bool frequent_reuse, bool *deferred_work_generated) {
|
||||
edata_t *
|
||||
hpa_alloc(tsdn_t *tsdn, hpa_shard_t *shard, size_t size, size_t alignment,
|
||||
bool zero, bool guarded, bool frequent_reuse,
|
||||
bool *deferred_work_generated) {
|
||||
assert((size & PAGE_MASK) == 0);
|
||||
assert(!guarded);
|
||||
witness_assert_depth_to_rank(
|
||||
|
|
@ -866,7 +836,6 @@ hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
|
|||
if (alignment > PAGE || zero) {
|
||||
return NULL;
|
||||
}
|
||||
hpa_shard_t *shard = hpa_from_pai(self);
|
||||
|
||||
/*
|
||||
* frequent_use here indicates this request comes from the arena bins,
|
||||
|
|
@ -907,7 +876,7 @@ hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
|
|||
/* Unlikely rollback in case of overfill */
|
||||
if (!edata_list_active_empty(&results)) {
|
||||
hpa_dalloc_batch(
|
||||
tsdn, self, &results, deferred_work_generated);
|
||||
tsdn, shard, &results, deferred_work_generated);
|
||||
}
|
||||
}
|
||||
witness_assert_depth_to_rank(
|
||||
|
|
@ -915,15 +884,15 @@ hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
|
|||
return edata;
|
||||
}
|
||||
|
||||
static bool
|
||||
hpa_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
|
||||
bool
|
||||
hpa_expand(tsdn_t *tsdn, hpa_shard_t *shard, edata_t *edata, size_t old_size,
|
||||
size_t new_size, bool zero, bool *deferred_work_generated) {
|
||||
/* Expand not yet supported. */
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
hpa_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
|
||||
bool
|
||||
hpa_shrink(tsdn_t *tsdn, hpa_shard_t *shard, edata_t *edata, size_t old_size,
|
||||
size_t new_size, bool *deferred_work_generated) {
|
||||
/* Shrink not yet supported. */
|
||||
return true;
|
||||
|
|
@ -982,10 +951,8 @@ hpa_dalloc_locked(tsdn_t *tsdn, hpa_shard_t *shard, edata_t *edata) {
|
|||
}
|
||||
|
||||
static void
|
||||
hpa_dalloc_batch(tsdn_t *tsdn, pai_t *self, edata_list_active_t *list,
|
||||
hpa_dalloc_batch(tsdn_t *tsdn, hpa_shard_t *shard, edata_list_active_t *list,
|
||||
bool *deferred_work_generated) {
|
||||
hpa_shard_t *shard = hpa_from_pai(self);
|
||||
|
||||
edata_t *edata;
|
||||
ql_foreach (edata, &list->head, ql_link_active) {
|
||||
hpa_dalloc_prepare_unlocked(tsdn, shard, edata);
|
||||
|
|
@ -1003,16 +970,15 @@ hpa_dalloc_batch(tsdn_t *tsdn, pai_t *self, edata_list_active_t *list,
|
|||
malloc_mutex_unlock(tsdn, &shard->mtx);
|
||||
}
|
||||
|
||||
static void
|
||||
hpa_dalloc(
|
||||
tsdn_t *tsdn, pai_t *self, edata_t *edata, bool *deferred_work_generated) {
|
||||
void
|
||||
hpa_dalloc(tsdn_t *tsdn, hpa_shard_t *shard, edata_t *edata,
|
||||
bool *deferred_work_generated) {
|
||||
assert(!edata_guarded_get(edata));
|
||||
|
||||
edata_list_active_t dalloc_list;
|
||||
edata_list_active_init(&dalloc_list);
|
||||
edata_list_active_append(&dalloc_list, edata);
|
||||
|
||||
hpa_shard_t *shard = hpa_from_pai(self);
|
||||
sec_dalloc(tsdn, &shard->sec, &dalloc_list);
|
||||
if (edata_list_active_empty(&dalloc_list)) {
|
||||
/* sec consumed the pointer */
|
||||
|
|
@ -1020,17 +986,16 @@ hpa_dalloc(
|
|||
return;
|
||||
}
|
||||
/* We may have more than one pointer to flush now */
|
||||
hpa_dalloc_batch(tsdn, self, &dalloc_list, deferred_work_generated);
|
||||
hpa_dalloc_batch(tsdn, shard, &dalloc_list, deferred_work_generated);
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate time until either purging or hugification ought to happen.
|
||||
* Called by background threads.
|
||||
*/
|
||||
static uint64_t
|
||||
hpa_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) {
|
||||
hpa_shard_t *shard = hpa_from_pai(self);
|
||||
uint64_t time_ns = BACKGROUND_THREAD_DEFERRED_MAX;
|
||||
uint64_t
|
||||
hpa_time_until_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard) {
|
||||
uint64_t time_ns = BACKGROUND_THREAD_DEFERRED_MAX;
|
||||
|
||||
malloc_mutex_lock(tsdn, &shard->mtx);
|
||||
|
||||
|
|
@ -1090,8 +1055,7 @@ hpa_sec_flush_impl(tsdn_t *tsdn, hpa_shard_t *shard) {
|
|||
|
||||
sec_flush(tsdn, &shard->sec, &to_flush);
|
||||
bool deferred_work_generated;
|
||||
hpa_dalloc_batch(
|
||||
tsdn, (pai_t *)shard, &to_flush, &deferred_work_generated);
|
||||
hpa_dalloc_batch(tsdn, shard, &to_flush, &deferred_work_generated);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
|||
60
src/pa.c
60
src/pa.c
|
|
@ -67,7 +67,7 @@ pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, pa_central_t *central,
|
|||
bool
|
||||
pa_shard_enable_hpa(tsdn_t *tsdn, pa_shard_t *shard,
|
||||
const hpa_shard_opts_t *hpa_opts, const sec_opts_t *hpa_sec_opts) {
|
||||
if (hpa_shard_init(tsdn, &shard->hpa_shard, &shard->central->hpa,
|
||||
if (hpa_shard_init(tsdn, &shard->hpa, &shard->central->hpa,
|
||||
shard->emap, shard->base, &shard->edata_cache, shard->ind,
|
||||
hpa_opts, hpa_sec_opts)) {
|
||||
return true;
|
||||
|
|
@ -82,7 +82,7 @@ void
|
|||
pa_shard_disable_hpa(tsdn_t *tsdn, pa_shard_t *shard) {
|
||||
atomic_store_b(&shard->use_hpa, false, ATOMIC_RELAXED);
|
||||
if (shard->ever_used_hpa) {
|
||||
hpa_shard_disable(tsdn, &shard->hpa_shard);
|
||||
hpa_shard_disable(tsdn, &shard->hpa);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -95,7 +95,7 @@ pa_shard_reset(tsdn_t *tsdn, pa_shard_t *shard) {
|
|||
void
|
||||
pa_shard_flush(tsdn_t *tsdn, pa_shard_t *shard) {
|
||||
if (shard->ever_used_hpa) {
|
||||
hpa_shard_flush(tsdn, &shard->hpa_shard);
|
||||
hpa_shard_flush(tsdn, &shard->hpa);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -108,16 +108,10 @@ void
|
|||
pa_shard_destroy(tsdn_t *tsdn, pa_shard_t *shard) {
|
||||
pac_destroy(tsdn, &shard->pac);
|
||||
if (shard->ever_used_hpa) {
|
||||
hpa_shard_destroy(tsdn, &shard->hpa_shard);
|
||||
hpa_shard_destroy(tsdn, &shard->hpa);
|
||||
}
|
||||
}
|
||||
|
||||
static pai_t *
|
||||
pa_get_pai(pa_shard_t *shard, edata_t *edata) {
|
||||
return (edata_pai_get(edata) == EXTENT_PAI_PAC ? &shard->pac.pai
|
||||
: &shard->hpa_shard.pai);
|
||||
}
|
||||
|
||||
edata_t *
|
||||
pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
|
||||
bool slab, szind_t szind, bool zero, bool guarded,
|
||||
|
|
@ -128,7 +122,7 @@ pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
|
|||
|
||||
edata_t *edata = NULL;
|
||||
if (!guarded && pa_shard_uses_hpa(shard)) {
|
||||
edata = pai_alloc(tsdn, &shard->hpa_shard.pai, size, alignment,
|
||||
edata = hpa_alloc(tsdn, &shard->hpa, size, alignment,
|
||||
zero, /* guarded */ false, slab, deferred_work_generated);
|
||||
}
|
||||
/*
|
||||
|
|
@ -136,7 +130,7 @@ pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
|
|||
* allocation request.
|
||||
*/
|
||||
if (edata == NULL) {
|
||||
edata = pai_alloc(tsdn, &shard->pac.pai, size, alignment, zero,
|
||||
edata = pac_alloc(tsdn, &shard->pac, size, alignment, zero,
|
||||
guarded, slab, deferred_work_generated);
|
||||
}
|
||||
if (edata != NULL) {
|
||||
|
|
@ -164,10 +158,15 @@ pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
|
|||
}
|
||||
size_t expand_amount = new_size - old_size;
|
||||
|
||||
pai_t *pai = pa_get_pai(shard, edata);
|
||||
|
||||
bool error = pai_expand(tsdn, pai, edata, old_size, new_size, zero,
|
||||
deferred_work_generated);
|
||||
/*
|
||||
* HPA expand always fails (it's a stub); skip the call entirely for
|
||||
* HPA-owned extents.
|
||||
*/
|
||||
if (edata_pai_get(edata) == EXTENT_PAI_HPA) {
|
||||
return true;
|
||||
}
|
||||
bool error = pac_expand(tsdn, &shard->pac, edata, old_size, new_size,
|
||||
zero, deferred_work_generated);
|
||||
if (error) {
|
||||
return true;
|
||||
}
|
||||
|
|
@ -189,9 +188,15 @@ pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
|
|||
}
|
||||
size_t shrink_amount = old_size - new_size;
|
||||
|
||||
pai_t *pai = pa_get_pai(shard, edata);
|
||||
bool error = pai_shrink(
|
||||
tsdn, pai, edata, old_size, new_size, deferred_work_generated);
|
||||
/*
|
||||
* HPA shrink always fails (it's a stub); skip the call entirely for
|
||||
* HPA-owned extents.
|
||||
*/
|
||||
if (edata_pai_get(edata) == EXTENT_PAI_HPA) {
|
||||
return true;
|
||||
}
|
||||
bool error = pac_shrink(tsdn, &shard->pac, edata, old_size, new_size,
|
||||
deferred_work_generated);
|
||||
if (error) {
|
||||
return true;
|
||||
}
|
||||
|
|
@ -216,8 +221,11 @@ pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata,
|
|||
edata_addr_set(edata, edata_base_get(edata));
|
||||
edata_szind_set(edata, SC_NSIZES);
|
||||
pa_nactive_sub(shard, edata_size_get(edata) >> LG_PAGE);
|
||||
pai_t *pai = pa_get_pai(shard, edata);
|
||||
pai_dalloc(tsdn, pai, edata, deferred_work_generated);
|
||||
if (edata_pai_get(edata) == EXTENT_PAI_HPA) {
|
||||
hpa_dalloc(tsdn, &shard->hpa, edata, deferred_work_generated);
|
||||
} else {
|
||||
pac_dalloc(tsdn, &shard->pac, edata, deferred_work_generated);
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
|
|
@ -236,14 +244,14 @@ pa_shard_set_deferral_allowed(
|
|||
tsdn_t *tsdn, pa_shard_t *shard, bool deferral_allowed) {
|
||||
if (pa_shard_uses_hpa(shard)) {
|
||||
hpa_shard_set_deferral_allowed(
|
||||
tsdn, &shard->hpa_shard, deferral_allowed);
|
||||
tsdn, &shard->hpa, deferral_allowed);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
pa_shard_do_deferred_work(tsdn_t *tsdn, pa_shard_t *shard) {
|
||||
if (pa_shard_uses_hpa(shard)) {
|
||||
hpa_shard_do_deferred_work(tsdn, &shard->hpa_shard);
|
||||
hpa_shard_do_deferred_work(tsdn, &shard->hpa);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -254,14 +262,14 @@ pa_shard_do_deferred_work(tsdn_t *tsdn, pa_shard_t *shard) {
|
|||
*/
|
||||
uint64_t
|
||||
pa_shard_time_until_deferred_work(tsdn_t *tsdn, pa_shard_t *shard) {
|
||||
uint64_t time = pai_time_until_deferred_work(tsdn, &shard->pac.pai);
|
||||
uint64_t time = pac_time_until_deferred_work(tsdn, &shard->pac);
|
||||
if (time == BACKGROUND_THREAD_DEFERRED_MIN) {
|
||||
return time;
|
||||
}
|
||||
|
||||
if (pa_shard_uses_hpa(shard)) {
|
||||
uint64_t hpa = pai_time_until_deferred_work(
|
||||
tsdn, &shard->hpa_shard.pai);
|
||||
uint64_t hpa = hpa_time_until_deferred_work(
|
||||
tsdn, &shard->hpa);
|
||||
if (hpa < time) {
|
||||
time = hpa;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ pa_shard_prefork0(tsdn_t *tsdn, pa_shard_t *shard) {
|
|||
void
|
||||
pa_shard_prefork2(tsdn_t *tsdn, pa_shard_t *shard) {
|
||||
if (shard->ever_used_hpa) {
|
||||
hpa_shard_prefork2(tsdn, &shard->hpa_shard);
|
||||
hpa_shard_prefork2(tsdn, &shard->hpa);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -25,7 +25,7 @@ void
|
|||
pa_shard_prefork3(tsdn_t *tsdn, pa_shard_t *shard) {
|
||||
malloc_mutex_prefork(tsdn, &shard->pac.grow_mtx);
|
||||
if (shard->ever_used_hpa) {
|
||||
hpa_shard_prefork3(tsdn, &shard->hpa_shard);
|
||||
hpa_shard_prefork3(tsdn, &shard->hpa);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -36,7 +36,7 @@ pa_shard_prefork4(tsdn_t *tsdn, pa_shard_t *shard) {
|
|||
ecache_prefork(tsdn, &shard->pac.ecache_retained);
|
||||
ecache_prefork(tsdn, &shard->pac.ecache_pinned);
|
||||
if (shard->ever_used_hpa) {
|
||||
hpa_shard_prefork4(tsdn, &shard->hpa_shard);
|
||||
hpa_shard_prefork4(tsdn, &shard->hpa);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -56,7 +56,7 @@ pa_shard_postfork_parent(tsdn_t *tsdn, pa_shard_t *shard) {
|
|||
malloc_mutex_postfork_parent(tsdn, &shard->pac.decay_dirty.mtx);
|
||||
malloc_mutex_postfork_parent(tsdn, &shard->pac.decay_muzzy.mtx);
|
||||
if (shard->ever_used_hpa) {
|
||||
hpa_shard_postfork_parent(tsdn, &shard->hpa_shard);
|
||||
hpa_shard_postfork_parent(tsdn, &shard->hpa);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -71,7 +71,7 @@ pa_shard_postfork_child(tsdn_t *tsdn, pa_shard_t *shard) {
|
|||
malloc_mutex_postfork_child(tsdn, &shard->pac.decay_dirty.mtx);
|
||||
malloc_mutex_postfork_child(tsdn, &shard->pac.decay_muzzy.mtx);
|
||||
if (shard->ever_used_hpa) {
|
||||
hpa_shard_postfork_child(tsdn, &shard->hpa_shard);
|
||||
hpa_shard_postfork_child(tsdn, &shard->hpa);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -84,7 +84,7 @@ size_t
|
|||
pa_shard_ndirty(const pa_shard_t *shard) {
|
||||
size_t ndirty = ecache_npages_get(&shard->pac.ecache_dirty);
|
||||
if (shard->ever_used_hpa) {
|
||||
ndirty += psset_ndirty(&shard->hpa_shard.psset);
|
||||
ndirty += psset_ndirty(&shard->hpa.psset);
|
||||
}
|
||||
return ndirty;
|
||||
}
|
||||
|
|
@ -177,7 +177,7 @@ pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard,
|
|||
}
|
||||
|
||||
if (shard->ever_used_hpa) {
|
||||
hpa_shard_stats_merge(tsdn, &shard->hpa_shard, hpa_stats_out);
|
||||
hpa_shard_stats_merge(tsdn, &shard->hpa, hpa_stats_out);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -209,11 +209,11 @@ pa_shard_mtx_stats_read(tsdn_t *tsdn, pa_shard_t *shard,
|
|||
|
||||
if (shard->ever_used_hpa) {
|
||||
pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
|
||||
&shard->hpa_shard.mtx, arena_prof_mutex_hpa_shard);
|
||||
&shard->hpa.mtx, arena_prof_mutex_hpa_shard);
|
||||
pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
|
||||
&shard->hpa_shard.grow_mtx,
|
||||
&shard->hpa.grow_mtx,
|
||||
arena_prof_mutex_hpa_shard_grow);
|
||||
sec_mutex_stats_read(tsdn, &shard->hpa_shard.sec,
|
||||
sec_mutex_stats_read(tsdn, &shard->hpa.sec,
|
||||
&mutex_prof_data[arena_prof_mutex_hpa_sec]);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
50
src/pac.c
50
src/pac.c
|
|
@ -4,17 +4,6 @@
|
|||
#include "jemalloc/internal/pac.h"
|
||||
#include "jemalloc/internal/san.h"
|
||||
|
||||
static edata_t *pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||
size_t alignment, bool zero, bool guarded, bool frequent_reuse,
|
||||
bool *deferred_work_generated);
|
||||
static bool pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated);
|
||||
static bool pac_shrink_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
size_t old_size, size_t new_size, bool *deferred_work_generated);
|
||||
static void pac_dalloc_impl(
|
||||
tsdn_t *tsdn, pai_t *self, edata_t *edata, bool *deferred_work_generated);
|
||||
static uint64_t pac_time_until_deferred_work(tsdn_t *tsdn, pai_t *self);
|
||||
|
||||
static inline void
|
||||
pac_decay_data_get(pac_t *pac, extent_state_t state, decay_t **r_decay,
|
||||
pac_decay_stats_t **r_decay_stats, ecache_t **r_ecache) {
|
||||
|
|
@ -103,12 +92,6 @@ pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
|
|||
pac->stats_mtx = stats_mtx;
|
||||
atomic_store_zu(&pac->extent_sn_next, 0, ATOMIC_RELAXED);
|
||||
|
||||
pac->pai.alloc = &pac_alloc_impl;
|
||||
pac->pai.expand = &pac_expand_impl;
|
||||
pac->pai.shrink = &pac_shrink_impl;
|
||||
pac->pai.dalloc = &pac_dalloc_impl;
|
||||
pac->pai.time_until_deferred_work = &pac_time_until_deferred_work;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
@ -265,11 +248,10 @@ pac_alloc_new_guarded(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size,
|
|||
return edata;
|
||||
}
|
||||
|
||||
static edata_t *
|
||||
pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment,
|
||||
edata_t *
|
||||
pac_alloc(tsdn_t *tsdn, pac_t *pac, size_t size, size_t alignment,
|
||||
bool zero, bool guarded, bool frequent_reuse,
|
||||
bool *deferred_work_generated) {
|
||||
pac_t *pac = (pac_t *)self;
|
||||
ehooks_t *ehooks = pac_ehooks_get(pac);
|
||||
|
||||
edata_t *edata = NULL;
|
||||
|
|
@ -292,10 +274,11 @@ pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment,
|
|||
return edata;
|
||||
}
|
||||
|
||||
static bool
|
||||
pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
|
||||
bool
|
||||
pac_expand(tsdn_t *tsdn, pac_t *pac, edata_t *edata, size_t old_size,
|
||||
size_t new_size, bool zero, bool *deferred_work_generated) {
|
||||
pac_t *pac = (pac_t *)self;
|
||||
assert(edata_pai_get(edata) == EXTENT_PAI_PAC);
|
||||
|
||||
ehooks_t *ehooks = pac_ehooks_get(pac);
|
||||
|
||||
size_t mapped_add = 0;
|
||||
|
|
@ -360,10 +343,11 @@ pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool
|
||||
pac_shrink_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
|
||||
bool
|
||||
pac_shrink(tsdn_t *tsdn, pac_t *pac, edata_t *edata, size_t old_size,
|
||||
size_t new_size, bool *deferred_work_generated) {
|
||||
pac_t *pac = (pac_t *)self;
|
||||
assert(edata_pai_get(edata) == EXTENT_PAI_PAC);
|
||||
|
||||
ehooks_t *ehooks = pac_ehooks_get(pac);
|
||||
|
||||
size_t shrink_amount = old_size - new_size;
|
||||
|
|
@ -385,10 +369,11 @@ pac_shrink_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
|
|||
return false;
|
||||
}
|
||||
|
||||
static void
|
||||
pac_dalloc_impl(
|
||||
tsdn_t *tsdn, pai_t *self, edata_t *edata, bool *deferred_work_generated) {
|
||||
pac_t *pac = (pac_t *)self;
|
||||
void
|
||||
pac_dalloc(tsdn_t *tsdn, pac_t *pac, edata_t *edata,
|
||||
bool *deferred_work_generated) {
|
||||
assert(edata_pai_get(edata) == EXTENT_PAI_PAC);
|
||||
|
||||
ehooks_t *ehooks = pac_ehooks_get(pac);
|
||||
|
||||
if (edata_guarded_get(edata)) {
|
||||
|
|
@ -432,10 +417,9 @@ pac_ns_until_purge(tsdn_t *tsdn, decay_t *decay, size_t npages) {
|
|||
return result;
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
pac_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) {
|
||||
uint64_t
|
||||
pac_time_until_deferred_work(tsdn_t *tsdn, pac_t *pac) {
|
||||
uint64_t time;
|
||||
pac_t *pac = (pac_t *)self;
|
||||
|
||||
time = pac_ns_until_purge(
|
||||
tsdn, &pac->decay_dirty, ecache_npages_get(&pac->ecache_dirty));
|
||||
|
|
|
|||
|
|
@ -364,7 +364,7 @@ collect_hpa_stats(int shard_id, hpa_shard_stats_t *hpa_stats_out) {
|
|||
|
||||
/* Merge HPA statistics from the shard */
|
||||
hpa_shard_stats_merge(
|
||||
tsdn, &g_shard_infra[shard_id].pa_shard.hpa_shard, hpa_stats_out);
|
||||
tsdn, &g_shard_infra[shard_id].pa_shard.hpa, hpa_stats_out);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
|||
140
test/unit/hpa.c
140
test/unit/hpa.c
|
|
@ -142,23 +142,23 @@ TEST_BEGIN(test_alloc_max) {
|
|||
|
||||
/* Small max */
|
||||
bool deferred_work_generated = false;
|
||||
edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX, PAGE, false, false,
|
||||
edata = hpa_alloc(tsdn, shard, ALLOC_MAX, PAGE, false, false,
|
||||
/* frequent_reuse */ false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edata, "Allocation of small max failed");
|
||||
|
||||
edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX + PAGE, PAGE, false,
|
||||
edata = hpa_alloc(tsdn, shard, ALLOC_MAX + PAGE, PAGE, false,
|
||||
false, /* frequent_reuse */ false, &deferred_work_generated);
|
||||
expect_ptr_null(edata, "Allocation of larger than small max succeeded");
|
||||
|
||||
edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX, PAGE, false, false,
|
||||
edata = hpa_alloc(tsdn, shard, ALLOC_MAX, PAGE, false, false,
|
||||
/* frequent_reuse */ true, &deferred_work_generated);
|
||||
expect_ptr_not_null(edata, "Allocation of frequent reused failed");
|
||||
|
||||
edata = pai_alloc(tsdn, &shard->pai, HUGEPAGE, PAGE, false, false,
|
||||
edata = hpa_alloc(tsdn, shard, HUGEPAGE, PAGE, false, false,
|
||||
/* frequent_reuse */ true, &deferred_work_generated);
|
||||
expect_ptr_not_null(edata, "Allocation of frequent reused failed");
|
||||
|
||||
edata = pai_alloc(tsdn, &shard->pai, HUGEPAGE + PAGE, PAGE, false,
|
||||
edata = hpa_alloc(tsdn, shard, HUGEPAGE + PAGE, PAGE, false,
|
||||
false, /* frequent_reuse */ true, &deferred_work_generated);
|
||||
expect_ptr_null(edata, "Allocation of larger than hugepage succeeded");
|
||||
|
||||
|
|
@ -262,7 +262,7 @@ TEST_BEGIN(test_stress) {
|
|||
size_t npages = npages_min
|
||||
+ prng_range_zu(
|
||||
&prng_state, npages_max - npages_min);
|
||||
edata_t *edata = pai_alloc(tsdn, &shard->pai,
|
||||
edata_t *edata = hpa_alloc(tsdn, shard,
|
||||
npages * PAGE, PAGE, false, false, false,
|
||||
&deferred_work_generated);
|
||||
assert_ptr_not_null(
|
||||
|
|
@ -281,7 +281,7 @@ TEST_BEGIN(test_stress) {
|
|||
live_edatas[victim] = live_edatas[nlive_edatas - 1];
|
||||
nlive_edatas--;
|
||||
node_remove(&tree, to_free);
|
||||
pai_dalloc(tsdn, &shard->pai, to_free,
|
||||
hpa_dalloc(tsdn, shard, to_free,
|
||||
&deferred_work_generated);
|
||||
}
|
||||
}
|
||||
|
|
@ -301,8 +301,7 @@ TEST_BEGIN(test_stress) {
|
|||
for (size_t i = 0; i < nlive_edatas; i++) {
|
||||
edata_t *to_free = live_edatas[i];
|
||||
node_remove(&tree, to_free);
|
||||
pai_dalloc(
|
||||
tsdn, &shard->pai, to_free, &deferred_work_generated);
|
||||
hpa_dalloc(tsdn, shard, to_free, &deferred_work_generated);
|
||||
}
|
||||
hpa_shard_destroy(tsdn, shard);
|
||||
|
||||
|
|
@ -392,7 +391,7 @@ TEST_BEGIN(test_defer_time) {
|
|||
tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
|
||||
edata_t *edatas[HUGEPAGE_PAGES];
|
||||
for (int i = 0; i < (int)HUGEPAGE_PAGES; i++) {
|
||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
||||
edatas[i] = hpa_alloc(tsdn, shard, PAGE, PAGE, false,
|
||||
false, false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
||||
}
|
||||
|
|
@ -408,8 +407,7 @@ TEST_BEGIN(test_defer_time) {
|
|||
|
||||
/* Purge. Recall that dirty_mult is .25. */
|
||||
for (int i = 0; i < (int)HUGEPAGE_PAGES / 2; i++) {
|
||||
pai_dalloc(
|
||||
tsdn, &shard->pai, edatas[i], &deferred_work_generated);
|
||||
hpa_dalloc(tsdn, shard, edatas[i], &deferred_work_generated);
|
||||
}
|
||||
|
||||
hpa_shard_do_deferred_work(tsdn, shard);
|
||||
|
|
@ -426,7 +424,7 @@ TEST_BEGIN(test_defer_time) {
|
|||
* be marked for pending hugify.
|
||||
*/
|
||||
for (int i = 0; i < (int)HUGEPAGE_PAGES / 2; i++) {
|
||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
||||
edatas[i] = hpa_alloc(tsdn, shard, PAGE, PAGE, false,
|
||||
false, false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
||||
}
|
||||
|
|
@ -434,7 +432,7 @@ TEST_BEGIN(test_defer_time) {
|
|||
* We would be ineligible for hugification, had we not already met the
|
||||
* threshold before dipping below it.
|
||||
*/
|
||||
pai_dalloc(tsdn, &shard->pai, edatas[0], &deferred_work_generated);
|
||||
hpa_dalloc(tsdn, shard, edatas[0], &deferred_work_generated);
|
||||
/* Wait for the threshold again. */
|
||||
nstime_init2(&defer_curtime, 22, 0);
|
||||
hpa_shard_do_deferred_work(tsdn, shard);
|
||||
|
|
@ -465,7 +463,7 @@ TEST_BEGIN(test_purge_no_infinite_loop) {
|
|||
const size_t size = npages * PAGE;
|
||||
|
||||
bool deferred_work_generated = false;
|
||||
edata_t *edata = pai_alloc(tsdn, &shard->pai, size, PAGE,
|
||||
edata_t *edata = hpa_alloc(tsdn, shard, size, PAGE,
|
||||
/* zero */ false, /* guarded */ false, /* frequent_reuse */ false,
|
||||
&deferred_work_generated);
|
||||
expect_ptr_not_null(edata, "Unexpected alloc failure");
|
||||
|
|
@ -502,10 +500,10 @@ TEST_BEGIN(test_no_min_purge_interval) {
|
|||
nstime_init(&defer_curtime, 0);
|
||||
tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
|
||||
|
||||
edata_t *edata = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false, false,
|
||||
edata_t *edata = hpa_alloc(tsdn, shard, PAGE, PAGE, false, false,
|
||||
false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edata, "Unexpected null edata");
|
||||
pai_dalloc(tsdn, &shard->pai, edata, &deferred_work_generated);
|
||||
hpa_dalloc(tsdn, shard, edata, &deferred_work_generated);
|
||||
hpa_shard_do_deferred_work(tsdn, shard);
|
||||
|
||||
/*
|
||||
|
|
@ -544,10 +542,10 @@ TEST_BEGIN(test_min_purge_interval) {
|
|||
nstime_init(&defer_curtime, 0);
|
||||
tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
|
||||
|
||||
edata_t *edata = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false, false,
|
||||
edata_t *edata = hpa_alloc(tsdn, shard, PAGE, PAGE, false, false,
|
||||
false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edata, "Unexpected null edata");
|
||||
pai_dalloc(tsdn, &shard->pai, edata, &deferred_work_generated);
|
||||
hpa_dalloc(tsdn, shard, edata, &deferred_work_generated);
|
||||
hpa_shard_do_deferred_work(tsdn, shard);
|
||||
|
||||
/*
|
||||
|
|
@ -597,14 +595,13 @@ TEST_BEGIN(test_purge) {
|
|||
enum { NALLOCS = 8 * HUGEPAGE_PAGES };
|
||||
edata_t *edatas[NALLOCS];
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
||||
edatas[i] = hpa_alloc(tsdn, shard, PAGE, PAGE, false,
|
||||
false, false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
||||
}
|
||||
/* Deallocate 3 hugepages out of 8. */
|
||||
for (int i = 0; i < 3 * (int)HUGEPAGE_PAGES; i++) {
|
||||
pai_dalloc(
|
||||
tsdn, &shard->pai, edatas[i], &deferred_work_generated);
|
||||
hpa_dalloc(tsdn, shard, edatas[i], &deferred_work_generated);
|
||||
}
|
||||
nstime_init2(&defer_curtime, 6, 0);
|
||||
hpa_shard_do_deferred_work(tsdn, shard);
|
||||
|
|
@ -665,14 +662,13 @@ TEST_BEGIN(test_experimental_max_purge_nhp) {
|
|||
enum { NALLOCS = 8 * HUGEPAGE_PAGES };
|
||||
edata_t *edatas[NALLOCS];
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
||||
edatas[i] = hpa_alloc(tsdn, shard, PAGE, PAGE, false,
|
||||
false, false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
||||
}
|
||||
/* Deallocate 3 hugepages out of 8. */
|
||||
for (int i = 0; i < 3 * (int)HUGEPAGE_PAGES; i++) {
|
||||
pai_dalloc(
|
||||
tsdn, &shard->pai, edatas[i], &deferred_work_generated);
|
||||
hpa_dalloc(tsdn, shard, edatas[i], &deferred_work_generated);
|
||||
}
|
||||
nstime_init2(&defer_curtime, 6, 0);
|
||||
hpa_shard_do_deferred_work(tsdn, shard);
|
||||
|
|
@ -732,10 +728,10 @@ TEST_BEGIN(test_vectorized_opt_eq_zero) {
|
|||
bool deferred_work_generated = false;
|
||||
nstime_init(&defer_curtime, 0);
|
||||
tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
|
||||
edata_t *edata = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false, false,
|
||||
edata_t *edata = hpa_alloc(tsdn, shard, PAGE, PAGE, false, false,
|
||||
false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edata, "Unexpected null edata");
|
||||
pai_dalloc(tsdn, &shard->pai, edata, &deferred_work_generated);
|
||||
hpa_dalloc(tsdn, shard, edata, &deferred_work_generated);
|
||||
hpa_shard_do_deferred_work(tsdn, shard);
|
||||
|
||||
expect_false(defer_vectorized_purge_called, "No vec purge");
|
||||
|
|
@ -775,15 +771,14 @@ TEST_BEGIN(test_starts_huge) {
|
|||
enum { NALLOCS = 2 * HUGEPAGE_PAGES };
|
||||
edata_t *edatas[NALLOCS];
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
||||
edatas[i] = hpa_alloc(tsdn, shard, PAGE, PAGE, false,
|
||||
false, false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
||||
}
|
||||
/* Deallocate 75% */
|
||||
int pages_to_deallocate = (int)(0.75 * NALLOCS);
|
||||
for (int i = 0; i < pages_to_deallocate; i++) {
|
||||
pai_dalloc(
|
||||
tsdn, &shard->pai, edatas[i], &deferred_work_generated);
|
||||
hpa_dalloc(tsdn, shard, edatas[i], &deferred_work_generated);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -818,7 +813,7 @@ TEST_BEGIN(test_starts_huge) {
|
|||
*/
|
||||
deferred_work_generated = false;
|
||||
const size_t HALF = HUGEPAGE_PAGES / 2;
|
||||
edatas[1] = pai_alloc(tsdn, &shard->pai, PAGE * (HALF + 1), PAGE, false,
|
||||
edatas[1] = hpa_alloc(tsdn, shard, PAGE * (HALF + 1), PAGE, false,
|
||||
false, false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edatas[1], "Unexpected null edata");
|
||||
expect_false(deferred_work_generated, "No page is purgable");
|
||||
|
|
@ -839,7 +834,7 @@ TEST_BEGIN(test_starts_huge) {
|
|||
expect_zu_eq(stat->merged.nactive, HALF + (HALF + 1), "1st + 2nd");
|
||||
|
||||
nstime_iadd(&defer_curtime, opts.min_purge_delay_ms * 1000 * 1000);
|
||||
pai_dalloc(tsdn, &shard->pai, edatas[1], &deferred_work_generated);
|
||||
hpa_dalloc(tsdn, shard, edatas[1], &deferred_work_generated);
|
||||
expect_true(deferred_work_generated, "");
|
||||
expect_zu_eq(stat->merged.ndirty, 3 * HALF, "1st + 2nd");
|
||||
|
||||
|
|
@ -856,12 +851,11 @@ TEST_BEGIN(test_starts_huge) {
|
|||
|
||||
/* Deallocate all the rest, but leave only two active */
|
||||
for (int i = pages_to_deallocate; i < NALLOCS - 2; ++i) {
|
||||
pai_dalloc(
|
||||
tsdn, &shard->pai, edatas[i], &deferred_work_generated);
|
||||
hpa_dalloc(tsdn, shard, edatas[i], &deferred_work_generated);
|
||||
}
|
||||
|
||||
/*
|
||||
* With prior pai_dalloc our last page becomes purgable, however we
|
||||
* With prior hpa_dalloc our last page becomes purgable, however we
|
||||
* still want to respect the delay. Thus, it is not time to purge yet.
|
||||
*/
|
||||
hpa_shard_do_deferred_work(tsdn, shard);
|
||||
|
|
@ -914,14 +908,13 @@ TEST_BEGIN(test_start_huge_purge_empty_only) {
|
|||
enum { NALLOCS = 2 * HUGEPAGE_PAGES };
|
||||
edata_t *edatas[NALLOCS];
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
||||
edatas[i] = hpa_alloc(tsdn, shard, PAGE, PAGE, false,
|
||||
false, false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
||||
}
|
||||
/* Deallocate all from the first and one PAGE from the second HP. */
|
||||
for (int i = 0; i < NALLOCS / 2 + 1; i++) {
|
||||
pai_dalloc(
|
||||
tsdn, &shard->pai, edatas[i], &deferred_work_generated);
|
||||
hpa_dalloc(tsdn, shard, edatas[i], &deferred_work_generated);
|
||||
}
|
||||
hpa_shard_do_deferred_work(tsdn, shard);
|
||||
expect_true(deferred_work_generated, "");
|
||||
|
|
@ -936,9 +929,9 @@ TEST_BEGIN(test_start_huge_purge_empty_only) {
|
|||
expect_zu_eq(0, ndefer_purge_calls, "Should not purge anything");
|
||||
|
||||
/* Allocate and free 2*PAGE so that it spills into second page again */
|
||||
edatas[0] = pai_alloc(tsdn, &shard->pai, 2 * PAGE, PAGE, false, false,
|
||||
edatas[0] = hpa_alloc(tsdn, shard, 2 * PAGE, PAGE, false, false,
|
||||
false, &deferred_work_generated);
|
||||
pai_dalloc(tsdn, &shard->pai, edatas[0], &deferred_work_generated);
|
||||
hpa_dalloc(tsdn, shard, edatas[0], &deferred_work_generated);
|
||||
expect_true(deferred_work_generated, "");
|
||||
hpa_shard_do_deferred_work(tsdn, shard);
|
||||
expect_zu_eq(1, ndefer_purge_calls, "Should purge, delay==0ms");
|
||||
|
|
@ -980,14 +973,13 @@ TEST_BEGIN(test_assume_huge_purge_fully) {
|
|||
enum { NALLOCS = HUGEPAGE_PAGES };
|
||||
edata_t *edatas[NALLOCS];
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
||||
edatas[i] = hpa_alloc(tsdn, shard, PAGE, PAGE, false,
|
||||
false, false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
||||
}
|
||||
/* Deallocate all */
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
pai_dalloc(
|
||||
tsdn, &shard->pai, edatas[i], &deferred_work_generated);
|
||||
hpa_dalloc(tsdn, shard, edatas[i], &deferred_work_generated);
|
||||
}
|
||||
hpa_shard_do_deferred_work(tsdn, shard);
|
||||
expect_true(deferred_work_generated, "");
|
||||
|
|
@ -998,12 +990,12 @@ TEST_BEGIN(test_assume_huge_purge_fully) {
|
|||
expect_zu_eq(
|
||||
shard->psset.stats.empty_slabs[0].npageslabs, 1, "Non huge");
|
||||
npurge_size = 0;
|
||||
edatas[0] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false, false,
|
||||
edatas[0] = hpa_alloc(tsdn, shard, PAGE, PAGE, false, false,
|
||||
false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edatas[0], "Unexpected null edata");
|
||||
expect_zu_eq(shard->psset.stats.merged.nactive, 1, "");
|
||||
expect_zu_eq(shard->psset.stats.slabs[1].npageslabs, 1, "Huge nonfull");
|
||||
pai_dalloc(tsdn, &shard->pai, edatas[0], &deferred_work_generated);
|
||||
hpa_dalloc(tsdn, shard, edatas[0], &deferred_work_generated);
|
||||
expect_true(deferred_work_generated, "");
|
||||
ndefer_purge_calls = 0;
|
||||
npurge_size = 0;
|
||||
|
|
@ -1013,14 +1005,13 @@ TEST_BEGIN(test_assume_huge_purge_fully) {
|
|||
|
||||
/* Now allocate all, free 10%, alloc 5%, assert non-huge */
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
||||
edatas[i] = hpa_alloc(tsdn, shard, PAGE, PAGE, false,
|
||||
false, false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
||||
}
|
||||
int ten_pct = NALLOCS / 10;
|
||||
for (int i = 0; i < ten_pct; i++) {
|
||||
pai_dalloc(
|
||||
tsdn, &shard->pai, edatas[i], &deferred_work_generated);
|
||||
hpa_dalloc(tsdn, shard, edatas[i], &deferred_work_generated);
|
||||
}
|
||||
ndefer_purge_calls = 0;
|
||||
npurge_size = 0;
|
||||
|
|
@ -1030,7 +1021,7 @@ TEST_BEGIN(test_assume_huge_purge_fully) {
|
|||
ten_pct * PAGE, npurge_size, "Should purge 10 percent of pages");
|
||||
|
||||
for (int i = 0; i < ten_pct / 2; i++) {
|
||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
||||
edatas[i] = hpa_alloc(tsdn, shard, PAGE, PAGE, false,
|
||||
false, false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
||||
}
|
||||
|
|
@ -1073,21 +1064,19 @@ TEST_BEGIN(test_eager_with_purge_threshold) {
|
|||
enum { NALLOCS = HUGEPAGE_PAGES };
|
||||
edata_t *edatas[NALLOCS];
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
||||
edatas[i] = hpa_alloc(tsdn, shard, PAGE, PAGE, false,
|
||||
false, false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
||||
}
|
||||
/* Deallocate less then threshold PAGEs. */
|
||||
for (size_t i = 0; i < THRESHOLD - 1; i++) {
|
||||
pai_dalloc(
|
||||
tsdn, &shard->pai, edatas[i], &deferred_work_generated);
|
||||
hpa_dalloc(tsdn, shard, edatas[i], &deferred_work_generated);
|
||||
}
|
||||
hpa_shard_do_deferred_work(tsdn, shard);
|
||||
expect_false(deferred_work_generated, "No page is purgable");
|
||||
expect_zu_eq(0, ndefer_purge_calls, "Should not purge yet");
|
||||
/* Deallocate one more page to meet the threshold */
|
||||
pai_dalloc(
|
||||
tsdn, &shard->pai, edatas[THRESHOLD - 1], &deferred_work_generated);
|
||||
hpa_dalloc(tsdn, shard, edatas[THRESHOLD - 1], &deferred_work_generated);
|
||||
hpa_shard_do_deferred_work(tsdn, shard);
|
||||
expect_zu_eq(1, ndefer_purge_calls, "Should purge");
|
||||
expect_zu_eq(shard->psset.stats.merged.ndirty, 0, "");
|
||||
|
|
@ -1126,14 +1115,13 @@ TEST_BEGIN(test_delay_when_not_allowed_deferral) {
|
|||
edata_t *edatas[NALLOCS];
|
||||
ndefer_purge_calls = 0;
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
||||
edatas[i] = hpa_alloc(tsdn, shard, PAGE, PAGE, false,
|
||||
false, false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
||||
}
|
||||
/* Deallocate all */
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
pai_dalloc(
|
||||
tsdn, &shard->pai, edatas[i], &deferred_work_generated);
|
||||
hpa_dalloc(tsdn, shard, edatas[i], &deferred_work_generated);
|
||||
}
|
||||
/* curtime = 100.0s */
|
||||
hpa_shard_do_deferred_work(tsdn, shard);
|
||||
|
|
@ -1143,19 +1131,17 @@ TEST_BEGIN(test_delay_when_not_allowed_deferral) {
|
|||
nstime_iadd(&defer_curtime, DELAY_NS - 1);
|
||||
/* This activity will take the curtime=100.1 and reset purgability */
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
||||
edatas[i] = hpa_alloc(tsdn, shard, PAGE, PAGE, false,
|
||||
false, false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
||||
}
|
||||
/* Dealloc all but 2 pages, purgable delay_ns later*/
|
||||
for (int i = 0; i < NALLOCS - 2; i++) {
|
||||
pai_dalloc(
|
||||
tsdn, &shard->pai, edatas[i], &deferred_work_generated);
|
||||
hpa_dalloc(tsdn, shard, edatas[i], &deferred_work_generated);
|
||||
}
|
||||
|
||||
nstime_iadd(&defer_curtime, DELAY_NS);
|
||||
pai_dalloc(
|
||||
tsdn, &shard->pai, edatas[NALLOCS - 1], &deferred_work_generated);
|
||||
hpa_dalloc(tsdn, shard, edatas[NALLOCS - 1], &deferred_work_generated);
|
||||
expect_true(ndefer_purge_calls > 0, "Should have purged");
|
||||
|
||||
ndefer_purge_calls = 0;
|
||||
|
|
@ -1197,22 +1183,20 @@ TEST_BEGIN(test_deferred_until_time) {
|
|||
edata_t *edatas[NALLOCS];
|
||||
ndefer_purge_calls = 0;
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
||||
edatas[i] = hpa_alloc(tsdn, shard, PAGE, PAGE, false,
|
||||
false, false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
||||
}
|
||||
/* Deallocate 25% */
|
||||
for (int i = 0; i < NALLOCS / 4; i++) {
|
||||
pai_dalloc(
|
||||
tsdn, &shard->pai, edatas[i], &deferred_work_generated);
|
||||
hpa_dalloc(tsdn, shard, edatas[i], &deferred_work_generated);
|
||||
}
|
||||
expect_true(deferred_work_generated, "We should hugify and purge");
|
||||
|
||||
/* Current time = 300ms, purge_eligible at 300ms + 1000ms */
|
||||
nstime_init(&defer_curtime, 300UL * 1000 * 1000);
|
||||
for (int i = NALLOCS / 4; i < NALLOCS; i++) {
|
||||
pai_dalloc(
|
||||
tsdn, &shard->pai, edatas[i], &deferred_work_generated);
|
||||
hpa_dalloc(tsdn, shard, edatas[i], &deferred_work_generated);
|
||||
}
|
||||
expect_true(deferred_work_generated, "Purge work generated");
|
||||
hpa_shard_do_deferred_work(tsdn, shard);
|
||||
|
|
@ -1220,14 +1204,14 @@ TEST_BEGIN(test_deferred_until_time) {
|
|||
|
||||
/* Current time = 900ms, purge_eligible at 1300ms */
|
||||
nstime_init(&defer_curtime, 900UL * 1000 * 1000);
|
||||
uint64_t until_ns = pai_time_until_deferred_work(tsdn, &shard->pai);
|
||||
uint64_t until_ns = hpa_time_until_deferred_work(tsdn, shard);
|
||||
expect_u64_eq(until_ns, BACKGROUND_THREAD_DEFERRED_MIN,
|
||||
"First pass did not happen");
|
||||
|
||||
/* Fake that first pass happened more than min_purge_interval_ago */
|
||||
nstime_init(&shard->last_purge, 350UL * 1000 * 1000);
|
||||
shard->stats.npurge_passes = 1;
|
||||
until_ns = pai_time_until_deferred_work(tsdn, &shard->pai);
|
||||
until_ns = hpa_time_until_deferred_work(tsdn, shard);
|
||||
expect_u64_eq(until_ns, BACKGROUND_THREAD_DEFERRED_MIN,
|
||||
"No need to heck anything it is more than interval");
|
||||
|
||||
|
|
@ -1235,7 +1219,7 @@ TEST_BEGIN(test_deferred_until_time) {
|
|||
nstime_init(&defer_curtime, 1000UL * 1000 * 1000);
|
||||
/* Next purge expected at 900ms + min_purge_interval = 1400ms */
|
||||
uint64_t expected_ms = 1400 - 1000;
|
||||
until_ns = pai_time_until_deferred_work(tsdn, &shard->pai);
|
||||
until_ns = hpa_time_until_deferred_work(tsdn, shard);
|
||||
expect_u64_eq(expected_ms, until_ns / (1000 * 1000), "Next in 400ms");
|
||||
destroy_test_data(shard);
|
||||
}
|
||||
|
|
@ -1276,7 +1260,7 @@ TEST_BEGIN(test_eager_no_hugify_on_threshold) {
|
|||
edata_t *edatas[NALLOCS];
|
||||
ndefer_purge_calls = 0;
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
||||
edatas[i] = hpa_alloc(tsdn, shard, PAGE, PAGE, false,
|
||||
false, false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
||||
}
|
||||
|
|
@ -1288,8 +1272,7 @@ TEST_BEGIN(test_eager_no_hugify_on_threshold) {
|
|||
|
||||
/* Deallocate 25% */
|
||||
for (int i = 0; i < NALLOCS / 4; i++) {
|
||||
pai_dalloc(
|
||||
tsdn, &shard->pai, edatas[i], &deferred_work_generated);
|
||||
hpa_dalloc(tsdn, shard, edatas[i], &deferred_work_generated);
|
||||
}
|
||||
expect_true(deferred_work_generated, "purge is needed");
|
||||
ndefer_purge_calls = 0;
|
||||
|
|
@ -1301,7 +1284,7 @@ TEST_BEGIN(test_eager_no_hugify_on_threshold) {
|
|||
ndefer_purge_calls = 0;
|
||||
nstime_iadd(&defer_curtime, 800UL * 1000 * 1000);
|
||||
for (int i = 0; i < NALLOCS / 4 - 1; i++) {
|
||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
||||
edatas[i] = hpa_alloc(tsdn, shard, PAGE, PAGE, false,
|
||||
false, false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
||||
}
|
||||
|
|
@ -1345,7 +1328,7 @@ TEST_BEGIN(test_hpa_hugify_style_none_huge_no_syscall) {
|
|||
edata_t *edatas[NALLOCS];
|
||||
ndefer_purge_calls = 0;
|
||||
for (int i = 0; i < NALLOCS / 2; i++) {
|
||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
||||
edatas[i] = hpa_alloc(tsdn, shard, PAGE, PAGE, false,
|
||||
false, false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
||||
}
|
||||
|
|
@ -1403,7 +1386,7 @@ TEST_BEGIN(test_experimental_hpa_enforce_hugify) {
|
|||
enum { NALLOCS = HUGEPAGE_PAGES * 95 / 100 };
|
||||
edata_t *edatas[NALLOCS];
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
||||
edatas[i] = hpa_alloc(tsdn, shard, PAGE, PAGE, false,
|
||||
false, false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
||||
}
|
||||
|
|
@ -1418,8 +1401,7 @@ TEST_BEGIN(test_experimental_hpa_enforce_hugify) {
|
|||
|
||||
/* Deallocate half to trigger purge */
|
||||
for (int i = 0; i < NALLOCS / 2; i++) {
|
||||
pai_dalloc(
|
||||
tsdn, &shard->pai, edatas[i], &deferred_work_generated);
|
||||
hpa_dalloc(tsdn, shard, edatas[i], &deferred_work_generated);
|
||||
}
|
||||
|
||||
hpa_shard_do_deferred_work(tsdn, shard);
|
||||
|
|
@ -1431,7 +1413,7 @@ TEST_BEGIN(test_experimental_hpa_enforce_hugify) {
|
|||
"Should have triggered dehugify syscall with eager style");
|
||||
|
||||
for (int i = 0; i < NALLOCS / 2; i++) {
|
||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
||||
edatas[i] = hpa_alloc(tsdn, shard, PAGE, PAGE, false,
|
||||
false, false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ TEST_BEGIN(test_hpa_background_thread_a0_initialized) {
|
|||
|
||||
arena_t *a0 = arena_get(TSDN_NULL, 0, false);
|
||||
expect_ptr_ne(a0, NULL, "");
|
||||
bool deferral_allowed = a0->pa_shard.hpa_shard.opts.deferral_allowed;
|
||||
bool deferral_allowed = a0->pa_shard.hpa.opts.deferral_allowed;
|
||||
expect_true(deferral_allowed,
|
||||
"Should have deferral_allowed option enabled for arena #0");
|
||||
}
|
||||
|
|
|
|||
|
|
@ -168,7 +168,7 @@ TEST_BEGIN(test_hpa_sec) {
|
|||
tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
|
||||
|
||||
/* alloc 1 PAGE, confirm sec has fill_extra bytes. */
|
||||
edata_t *edata1 = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false, false,
|
||||
edata_t *edata1 = hpa_alloc(tsdn, shard, PAGE, PAGE, false, false,
|
||||
false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edata1, "Unexpected null edata");
|
||||
hpa_shard_stats_t hpa_stats;
|
||||
|
|
@ -182,7 +182,7 @@ TEST_BEGIN(test_hpa_sec) {
|
|||
/* Alloc/dealloc NALLOCS times and confirm extents are in sec. */
|
||||
edata_t *edatas[NALLOCS];
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
||||
edatas[i] = hpa_alloc(tsdn, shard, PAGE, PAGE, false,
|
||||
false, false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
||||
}
|
||||
|
|
@ -192,8 +192,7 @@ TEST_BEGIN(test_hpa_sec) {
|
|||
expect_zu_eq(hpa_stats.secstats.bytes, PAGE, "2 refills (at 0 and 4)");
|
||||
|
||||
for (int i = 0; i < NALLOCS - 1; i++) {
|
||||
pai_dalloc(
|
||||
tsdn, &shard->pai, edatas[i], &deferred_work_generated);
|
||||
hpa_dalloc(tsdn, shard, edatas[i], &deferred_work_generated);
|
||||
}
|
||||
memset(&hpa_stats, 0, sizeof(hpa_shard_stats_t));
|
||||
hpa_shard_stats_merge(tsdn, shard, &hpa_stats);
|
||||
|
|
@ -202,8 +201,7 @@ TEST_BEGIN(test_hpa_sec) {
|
|||
hpa_stats.secstats.bytes, sec_opts.max_bytes, "sec should be full");
|
||||
|
||||
/* this one should flush 1 + 0.25 * 8 = 3 extents */
|
||||
pai_dalloc(
|
||||
tsdn, &shard->pai, edatas[NALLOCS - 1], &deferred_work_generated);
|
||||
hpa_dalloc(tsdn, shard, edatas[NALLOCS - 1], &deferred_work_generated);
|
||||
memset(&hpa_stats, 0, sizeof(hpa_shard_stats_t));
|
||||
hpa_shard_stats_merge(tsdn, shard, &hpa_stats);
|
||||
expect_zu_eq(hpa_stats.psset_stats.merged.nactive, (NALLOCS - 1), "");
|
||||
|
|
@ -212,7 +210,7 @@ TEST_BEGIN(test_hpa_sec) {
|
|||
"sec should be full");
|
||||
|
||||
/* Next allocation should come from SEC and not increase active */
|
||||
edata_t *edata2 = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false, false,
|
||||
edata_t *edata2 = hpa_alloc(tsdn, shard, PAGE, PAGE, false, false,
|
||||
false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edata2, "Unexpected null edata");
|
||||
memset(&hpa_stats, 0, sizeof(hpa_shard_stats_t));
|
||||
|
|
@ -222,7 +220,7 @@ TEST_BEGIN(test_hpa_sec) {
|
|||
"sec should have max_bytes minus one page that just came from it");
|
||||
|
||||
/* We return this one and it stays in the cache */
|
||||
pai_dalloc(tsdn, &shard->pai, edata2, &deferred_work_generated);
|
||||
hpa_dalloc(tsdn, shard, edata2, &deferred_work_generated);
|
||||
memset(&hpa_stats, 0, sizeof(hpa_shard_stats_t));
|
||||
hpa_shard_stats_merge(tsdn, shard, &hpa_stats);
|
||||
expect_zu_eq(hpa_stats.psset_stats.merged.nactive, NALLOCS - 1, "");
|
||||
|
|
|
|||
|
|
@ -177,7 +177,7 @@ TEST_BEGIN(test_hpa_hugify_style_none_huge_no_syscall_thp_always) {
|
|||
edata_t *edatas[NALLOCS];
|
||||
ndefer_purge_calls = 0;
|
||||
for (int i = 0; i < NALLOCS / 2; i++) {
|
||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
||||
edatas[i] = hpa_alloc(tsdn, shard, PAGE, PAGE, false,
|
||||
false, false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
||||
}
|
||||
|
|
|
|||
|
|
@ -174,10 +174,10 @@ TEST_BEGIN(test_vectorized_failure_fallback) {
|
|||
nstime_init(&defer_curtime, 0);
|
||||
tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
|
||||
|
||||
edata_t *edata = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false, false,
|
||||
edata_t *edata = hpa_alloc(tsdn, shard, PAGE, PAGE, false, false,
|
||||
false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edata, "Unexpected null edata");
|
||||
pai_dalloc(tsdn, &shard->pai, edata, &deferred_work_generated);
|
||||
hpa_dalloc(tsdn, shard, edata, &deferred_work_generated);
|
||||
hpa_shard_do_deferred_work(tsdn, shard);
|
||||
|
||||
expect_true(defer_vec_purge_didfail, "Expect vec purge fail");
|
||||
|
|
@ -218,7 +218,7 @@ TEST_BEGIN(test_more_regions_purged_from_one_page) {
|
|||
enum { NALLOCS = 8 * HUGEPAGE_PAGES };
|
||||
edata_t *edatas[NALLOCS];
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
||||
edatas[i] = hpa_alloc(tsdn, shard, PAGE, PAGE, false,
|
||||
false, false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
||||
}
|
||||
|
|
@ -228,7 +228,7 @@ TEST_BEGIN(test_more_regions_purged_from_one_page) {
|
|||
for (int i = 0; i < 3 * (int)HUGEPAGE_PAGES; i++) {
|
||||
int j = i % HUGEPAGE_PAGES;
|
||||
if (j != 1 && j != 3) {
|
||||
pai_dalloc(tsdn, &shard->pai, edatas[i],
|
||||
hpa_dalloc(tsdn, shard, edatas[i],
|
||||
&deferred_work_generated);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -170,7 +170,7 @@ TEST_BEGIN(test_vectorized_purge) {
|
|||
enum { NALLOCS = 8 * HUGEPAGE_PAGES };
|
||||
edata_t *edatas[NALLOCS];
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
||||
edatas[i] = hpa_alloc(tsdn, shard, PAGE, PAGE, false,
|
||||
false, false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
||||
}
|
||||
|
|
@ -180,7 +180,7 @@ TEST_BEGIN(test_vectorized_purge) {
|
|||
for (int i = 0; i < 3 * (int)HUGEPAGE_PAGES; i++) {
|
||||
int j = i % HUGEPAGE_PAGES;
|
||||
if (j != 1 && j != 3) {
|
||||
pai_dalloc(tsdn, &shard->pai, edatas[i],
|
||||
hpa_dalloc(tsdn, shard, edatas[i],
|
||||
&deferred_work_generated);
|
||||
}
|
||||
}
|
||||
|
|
@ -232,14 +232,14 @@ TEST_BEGIN(test_purge_more_than_one_batch_pages) {
|
|||
enum { NALLOCS = HPA_PURGE_BATCH_MAX * 3 * HUGEPAGE_PAGES };
|
||||
edata_t *edatas[NALLOCS];
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
||||
edatas[i] = hpa_alloc(tsdn, shard, PAGE, PAGE, false,
|
||||
false, false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
||||
}
|
||||
for (int i = 0; i < HPA_PURGE_BATCH_MAX * 2 * (int)HUGEPAGE_PAGES;
|
||||
i++) {
|
||||
pai_dalloc(
|
||||
tsdn, &shard->pai, edatas[i], &deferred_work_generated);
|
||||
hpa_dalloc(tsdn, shard, edatas[i],
|
||||
&deferred_work_generated);
|
||||
}
|
||||
|
||||
hpa_shard_do_deferred_work(tsdn, shard);
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue