mirror of
https://github.com/jemalloc/jemalloc.git
synced 2026-05-14 00:46:21 +03:00
Replace PAI vtable dispatch with direct calls
The pai_t interface implements C-style polymorphism via function pointers to abstract over PAC and HPA. This abstraction provides no real benefit: only two implementations exist, the dispatcher already knows which one to use, and HPA stubs 2 of 5 operations. Remove the runtime dispatch in favor of direct calls. This commit: - Promotes pac_alloc/expand/shrink/dalloc/time_until_deferred_work to external linkage and replaces the pai_t *self parameter with pac_t *pac. - Promotes hpa_alloc/expand/shrink/dalloc/time_until_deferred_work to external linkage and replaces pai_t *self with hpa_shard_t *shard. - Updates hpa_dalloc_batch's signature to take hpa_shard_t * directly and removes the hpa_from_pai container-of helper. Updates internal callers in hpa_alloc, hpa_dalloc, and hpa_sec_flush_impl. - Drops the vtable assignments from pac_init() and hpa_shard_init(). - Replaces pai_alloc/dalloc/etc. dispatch in pa.c with direct calls. HPA expand and shrink (which are unconditional failure stubs) are skipped entirely for HPA-owned extents. - Removes the pa_get_pai() helper. - Updates tests in test/unit/hpa.c and test/unit/hpa_sec_integration.c to call hpa_alloc/dalloc/etc. directly. The pai_t struct field stays as dead weight in pac_t and hpa_shard_t; it is removed in the next commit along with pai.h itself. No behavioral changes.
This commit is contained in:
parent
163c871d6c
commit
1dfa6f7aa4
14 changed files with 183 additions and 224 deletions
|
|
@ -154,6 +154,18 @@ bool hpa_shard_init(tsdn_t *tsdn, hpa_shard_t *shard, hpa_central_t *central,
|
|||
emap_t *emap, base_t *base, edata_cache_t *edata_cache, unsigned ind,
|
||||
const hpa_shard_opts_t *opts, const sec_opts_t *sec_opts);
|
||||
|
||||
edata_t *hpa_alloc(tsdn_t *tsdn, hpa_shard_t *shard, size_t size,
|
||||
size_t alignment, bool zero, bool guarded, bool frequent_reuse,
|
||||
bool *deferred_work_generated);
|
||||
bool hpa_expand(tsdn_t *tsdn, hpa_shard_t *shard, edata_t *edata,
|
||||
size_t old_size, size_t new_size, bool zero,
|
||||
bool *deferred_work_generated);
|
||||
bool hpa_shrink(tsdn_t *tsdn, hpa_shard_t *shard, edata_t *edata,
|
||||
size_t old_size, size_t new_size, bool *deferred_work_generated);
|
||||
void hpa_dalloc(tsdn_t *tsdn, hpa_shard_t *shard, edata_t *edata,
|
||||
bool *deferred_work_generated);
|
||||
uint64_t hpa_time_until_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard);
|
||||
|
||||
void hpa_shard_stats_accum(hpa_shard_stats_t *dst, hpa_shard_stats_t *src);
|
||||
void hpa_shard_stats_merge(
|
||||
tsdn_t *tsdn, hpa_shard_t *shard, hpa_shard_stats_t *dst);
|
||||
|
|
|
|||
|
|
@ -96,7 +96,7 @@ struct pa_shard_s {
|
|||
/* Allocates from a PAC. */
|
||||
pac_t pac;
|
||||
|
||||
hpa_shard_t hpa_shard;
|
||||
hpa_shard_t hpa;
|
||||
|
||||
/* The source of edata_t objects. */
|
||||
edata_cache_t edata_cache;
|
||||
|
|
|
|||
|
|
@ -165,6 +165,17 @@ bool pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
|
|||
ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms, pac_stats_t *pac_stats,
|
||||
malloc_mutex_t *stats_mtx);
|
||||
|
||||
edata_t *pac_alloc(tsdn_t *tsdn, pac_t *pac, size_t size, size_t alignment,
|
||||
bool zero, bool guarded, bool frequent_reuse,
|
||||
bool *deferred_work_generated);
|
||||
bool pac_expand(tsdn_t *tsdn, pac_t *pac, edata_t *edata, size_t old_size,
|
||||
size_t new_size, bool zero, bool *deferred_work_generated);
|
||||
bool pac_shrink(tsdn_t *tsdn, pac_t *pac, edata_t *edata, size_t old_size,
|
||||
size_t new_size, bool *deferred_work_generated);
|
||||
void pac_dalloc(tsdn_t *tsdn, pac_t *pac, edata_t *edata,
|
||||
bool *deferred_work_generated);
|
||||
uint64_t pac_time_until_deferred_work(tsdn_t *tsdn, pac_t *pac);
|
||||
|
||||
static inline size_t
|
||||
pac_mapped(const pac_t *pac) {
|
||||
return atomic_load_zu(&pac->stats->pac_mapped, ATOMIC_RELAXED);
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue