Remove dead pai_t field and delete pai.h

After replacing PAI vtable dispatch with direct calls in the previous
commit, the embedded pai_t member in pac_t and hpa_shard_t is dead
weight, and pai.h has no remaining users. Remove them.

Changes:
- Drop pai_t pai member (and "must be first member" comment) from
  pac_t and hpa_shard_t.
- Replace #include "jemalloc/internal/pai.h" with the actually-needed
  edata.h / tsd_types.h in pac.h, hpa.h, sec.h, pa.h.
- Update extent_pai_t comment in edata.h to no longer reference pai.h.
- Update three remaining test files (hpa_thp_always,
  hpa_vectorized_madvise, hpa_vectorized_madvise_large_batch) to call
  hpa_*(tsdn, shard, ...) directly instead of pai_*(tsdn, &shard->pai,
  ...).
- Delete include/jemalloc/internal/pai.h.

No behavioral changes.
This commit is contained in:
guangli-dai 2026-04-20 17:24:21 -07:00 committed by Guangli Dai
parent 1dfa6f7aa4
commit 8edd101286
7 changed files with 10 additions and 86 deletions

View file

@ -48,8 +48,8 @@ enum extent_head_state_e {
typedef enum extent_head_state_e extent_head_state_t;
/*
* Which implementation of the page allocator interface, (PAI, defined in
* pai.h) owns the given extent?
* Which page allocator implementation (PAC or HPA) owns the given extent?
* Used by PA to route expand/shrink/dalloc to the correct implementation.
*/
enum extent_pai_e { EXTENT_PAI_PAC = 0, EXTENT_PAI_HPA = 1 };
typedef enum extent_pai_e extent_pai_t;

View file

@ -10,7 +10,6 @@
#include "jemalloc/internal/hpa_hooks.h"
#include "jemalloc/internal/hpa_opts.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/pai.h"
#include "jemalloc/internal/psset.h"
#include "jemalloc/internal/sec.h"
@ -63,12 +62,6 @@ struct hpa_shard_stats_s {
typedef struct hpa_shard_s hpa_shard_t;
struct hpa_shard_s {
/*
* pai must be the first member; we cast from a pointer to it to a
* pointer to the hpa_shard_t.
*/
pai_t pai;
/* The central allocator we get our hugepages from. */
hpa_central_t *central;

View file

@ -10,15 +10,13 @@
#include "jemalloc/internal/hpa.h"
#include "jemalloc/internal/lockedint.h"
#include "jemalloc/internal/pac.h"
#include "jemalloc/internal/pai.h"
#include "jemalloc/internal/sec.h"
/*
* The page allocator; responsible for acquiring pages of memory for
* allocations. It picks the implementation of the page allocator interface
* (i.e. a pai_t) to handle a given page-level allocation request. For now, the
* only such implementation is the PAC code ("page allocator classic"), but
* others will be coming soon.
* allocations. It dispatches each page-level allocation request to either
* the PAC ("page allocator classic") or the HPA ("hugepage allocator")
* by calling their pac_*() / hpa_*() entry points directly.
*/
typedef struct pa_central_s pa_central_t;

View file

@ -4,10 +4,11 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/decay.h"
#include "jemalloc/internal/ecache.h"
#include "jemalloc/internal/edata.h"
#include "jemalloc/internal/edata_cache.h"
#include "jemalloc/internal/exp_grow.h"
#include "jemalloc/internal/lockedint.h"
#include "jemalloc/internal/pai.h"
#include "jemalloc/internal/tsd_types.h"
#include "san_bump.h"
/*
@ -87,11 +88,6 @@ struct pac_stats_s {
typedef struct pac_s pac_t;
struct pac_s {
/*
* Must be the first member (we convert it to a PAC given only a
* pointer). The handle to the allocation interface.
*/
pai_t pai;
/* True once pinned memory has been seen. */
atomic_b_t has_pinned;
/*

View file

@ -1,63 +0,0 @@
#ifndef JEMALLOC_INTERNAL_PAI_H
#define JEMALLOC_INTERNAL_PAI_H
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/edata.h"
#include "jemalloc/internal/tsd_types.h"
/* An interface for page allocation. */
typedef struct pai_s pai_t;
struct pai_s {
/* Returns NULL on failure. */
edata_t *(*alloc)(tsdn_t *tsdn, pai_t *self, size_t size,
size_t alignment, bool zero, bool guarded, bool frequent_reuse,
bool *deferred_work_generated);
bool (*expand)(tsdn_t *tsdn, pai_t *self, edata_t *edata,
size_t old_size, size_t new_size, bool zero,
bool *deferred_work_generated);
bool (*shrink)(tsdn_t *tsdn, pai_t *self, edata_t *edata,
size_t old_size, size_t new_size, bool *deferred_work_generated);
void (*dalloc)(tsdn_t *tsdn, pai_t *self, edata_t *edata,
bool *deferred_work_generated);
uint64_t (*time_until_deferred_work)(tsdn_t *tsdn, pai_t *self);
};
/*
* These are just simple convenience functions to avoid having to reference the
* same pai_t twice on every invocation.
*/
static inline edata_t *
pai_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
bool guarded, bool frequent_reuse, bool *deferred_work_generated) {
return self->alloc(tsdn, self, size, alignment, zero, guarded,
frequent_reuse, deferred_work_generated);
}
static inline bool
pai_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
size_t new_size, bool zero, bool *deferred_work_generated) {
return self->expand(tsdn, self, edata, old_size, new_size, zero,
deferred_work_generated);
}
static inline bool
pai_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
size_t new_size, bool *deferred_work_generated) {
return self->shrink(
tsdn, self, edata, old_size, new_size, deferred_work_generated);
}
static inline void
pai_dalloc(
tsdn_t *tsdn, pai_t *self, edata_t *edata, bool *deferred_work_generated) {
self->dalloc(tsdn, self, edata, deferred_work_generated);
}
static inline uint64_t
pai_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) {
return self->time_until_deferred_work(tsdn, self);
}
#endif /* JEMALLOC_INTERNAL_PAI_H */

View file

@ -4,9 +4,10 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/edata.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/pai.h"
#include "jemalloc/internal/sec_opts.h"
#include "jemalloc/internal/tsd_types.h"
/*
* Small extent cache.

View file

@ -238,8 +238,7 @@ TEST_BEGIN(test_purge_more_than_one_batch_pages) {
}
for (int i = 0; i < HPA_PURGE_BATCH_MAX * 2 * (int)HUGEPAGE_PAGES;
i++) {
hpa_dalloc(tsdn, shard, edatas[i],
&deferred_work_generated);
hpa_dalloc(tsdn, shard, edatas[i], &deferred_work_generated);
}
hpa_shard_do_deferred_work(tsdn, shard);