mirror of
https://github.com/jemalloc/jemalloc.git
synced 2026-05-17 18:36:21 +03:00
[thread_event] Remove macros from thread_event and replace with dynamic event objects
This commit is contained in:
parent
1972241cd2
commit
153fab2b00
17 changed files with 455 additions and 318 deletions
|
|
@ -2,108 +2,46 @@
|
|||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/thread_event.h"
|
||||
|
||||
/*
|
||||
* Signatures for event specific functions. These functions should be defined
|
||||
* by the modules owning each event. The signatures here verify that the
|
||||
* definitions follow the right format.
|
||||
*
|
||||
* The first two are functions computing new / postponed event wait time. New
|
||||
* event wait time is the time till the next event if an event is currently
|
||||
* being triggered; postponed event wait time is the time till the next event
|
||||
* if an event should be triggered but needs to be postponed, e.g. when the TSD
|
||||
* is not nominal or during reentrancy.
|
||||
*
|
||||
* The third is the event handler function, which is called whenever an event
|
||||
* is triggered. The parameter is the elapsed time since the last time an
|
||||
* event of the same type was triggered.
|
||||
*/
|
||||
#define E(event, condition_unused, is_alloc_event_unused) \
|
||||
uint64_t event##_new_event_wait(tsd_t *tsd); \
|
||||
uint64_t event##_postponed_event_wait(tsd_t *tsd); \
|
||||
void event##_event_handler(tsd_t *tsd, uint64_t elapsed);
|
||||
|
||||
ITERATE_OVER_ALL_EVENTS
|
||||
#undef E
|
||||
|
||||
/* Signatures for internal functions fetching elapsed time. */
|
||||
#define E(event, condition_unused, is_alloc_event_unused) \
|
||||
static uint64_t event##_fetch_elapsed(tsd_t *tsd);
|
||||
|
||||
ITERATE_OVER_ALL_EVENTS
|
||||
#undef E
|
||||
|
||||
static uint64_t
|
||||
tcache_gc_fetch_elapsed(tsd_t *tsd) {
|
||||
return TE_INVALID_ELAPSED;
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
tcache_gc_dalloc_fetch_elapsed(tsd_t *tsd) {
|
||||
return TE_INVALID_ELAPSED;
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
prof_sample_fetch_elapsed(tsd_t *tsd) {
|
||||
uint64_t last_event = thread_allocated_last_event_get(tsd);
|
||||
uint64_t last_sample_event = prof_sample_last_event_get(tsd);
|
||||
prof_sample_last_event_set(tsd, last_event);
|
||||
return last_event - last_sample_event;
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
stats_interval_fetch_elapsed(tsd_t *tsd) {
|
||||
uint64_t last_event = thread_allocated_last_event_get(tsd);
|
||||
uint64_t last_stats_event = stats_interval_last_event_get(tsd);
|
||||
stats_interval_last_event_set(tsd, last_event);
|
||||
return last_event - last_stats_event;
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
peak_alloc_fetch_elapsed(tsd_t *tsd) {
|
||||
return TE_INVALID_ELAPSED;
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
peak_dalloc_fetch_elapsed(tsd_t *tsd) {
|
||||
return TE_INVALID_ELAPSED;
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
prof_threshold_fetch_elapsed(tsd_t *tsd) {
|
||||
return TE_INVALID_ELAPSED;
|
||||
}
|
||||
|
||||
/* Per event facilities done. */
|
||||
#include "jemalloc/internal/thread_event_registry.h"
|
||||
#include "jemalloc/internal/peak_event.h"
|
||||
|
||||
static bool
|
||||
te_ctx_has_active_events(te_ctx_t *ctx) {
|
||||
assert(config_debug);
|
||||
#define E(event, condition, alloc_event) \
|
||||
if (condition && alloc_event == ctx->is_alloc) { \
|
||||
return true; \
|
||||
if (ctx->is_alloc) {
|
||||
for (int i = 0; i < te_alloc_count; ++i) {
|
||||
if (te_alloc_handlers[i]->enabled()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (int i = 0; i < te_dalloc_count; ++i) {
|
||||
if (te_dalloc_handlers[i]->enabled()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
ITERATE_OVER_ALL_EVENTS
|
||||
#undef E
|
||||
return false;
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
te_next_event_compute(tsd_t *tsd, bool is_alloc) {
|
||||
te_base_cb_t **handlers = is_alloc ? te_alloc_handlers : te_dalloc_handlers;
|
||||
uint64_t *waits = is_alloc ? tsd_te_datap_get_unsafe(tsd)->alloc_wait : tsd_te_datap_get_unsafe(tsd)->dalloc_wait;
|
||||
int count = is_alloc ? te_alloc_count : te_dalloc_count;
|
||||
|
||||
uint64_t wait = TE_MAX_START_WAIT;
|
||||
#define E(event, condition, alloc_event) \
|
||||
if (is_alloc == alloc_event && condition) { \
|
||||
uint64_t event_wait = \
|
||||
event##_event_wait_get(tsd); \
|
||||
assert(event_wait <= TE_MAX_START_WAIT); \
|
||||
if (event_wait > 0U && event_wait < wait) { \
|
||||
wait = event_wait; \
|
||||
} \
|
||||
|
||||
for (int i = 0; i < count; i++) {
|
||||
if (handlers[i]->enabled()) {
|
||||
uint64_t ev_wait = waits[i];
|
||||
assert(ev_wait <= TE_MAX_START_WAIT);
|
||||
if (ev_wait > 0U && ev_wait < wait) {
|
||||
wait = ev_wait;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ITERATE_OVER_ALL_EVENTS
|
||||
#undef E
|
||||
assert(wait <= TE_MAX_START_WAIT);
|
||||
return wait;
|
||||
}
|
||||
|
||||
|
|
@ -238,18 +176,132 @@ te_adjust_thresholds_helper(tsd_t *tsd, te_ctx_t *ctx,
|
|||
te_ctx_next_event_set(tsd, ctx, next_event);
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
te_clip_event_wait(uint64_t event_wait) {
|
||||
assert(event_wait > 0U);
|
||||
if (TE_MIN_START_WAIT > 1U &&
|
||||
unlikely(event_wait < TE_MIN_START_WAIT)) {
|
||||
event_wait = TE_MIN_START_WAIT;
|
||||
static void
|
||||
te_init_waits(tsd_t *tsd, uint64_t *wait, bool is_alloc) {
|
||||
te_base_cb_t **handlers = is_alloc ? te_alloc_handlers : te_dalloc_handlers;
|
||||
uint64_t *waits = is_alloc ? tsd_te_datap_get_unsafe(tsd)->alloc_wait : tsd_te_datap_get_unsafe(tsd)->dalloc_wait;
|
||||
int count = is_alloc ? te_alloc_count : te_dalloc_count;
|
||||
for (int i = 0; i < count; i++) {
|
||||
if (handlers[i]->enabled()) {
|
||||
uint64_t ev_wait = handlers[i]->new_event_wait(tsd);
|
||||
assert(ev_wait > 0);
|
||||
waits[i] = ev_wait;
|
||||
if (ev_wait < *wait) {
|
||||
*wait = ev_wait;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (TE_MAX_START_WAIT < UINT64_MAX &&
|
||||
unlikely(event_wait > TE_MAX_START_WAIT)) {
|
||||
event_wait = TE_MAX_START_WAIT;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
te_update_wait(tsd_t *tsd, uint64_t accumbytes, bool allow,
|
||||
uint64_t *ev_wait, uint64_t *wait, te_base_cb_t *handler,
|
||||
uint64_t new_wait) {
|
||||
bool ret = false;
|
||||
if (*ev_wait > accumbytes) {
|
||||
*ev_wait -= accumbytes;
|
||||
} else if (!allow) {
|
||||
*ev_wait = handler->postponed_event_wait(tsd);
|
||||
} else {
|
||||
ret = true;
|
||||
*ev_wait = new_wait == 0 ?
|
||||
handler->new_event_wait(tsd) :
|
||||
new_wait;
|
||||
}
|
||||
|
||||
assert(*ev_wait > 0);
|
||||
if (*ev_wait < *wait) {
|
||||
*wait = *ev_wait;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
extern uint64_t stats_interval_accum_batch;
|
||||
/* Return number of handlers enqueued into to_trigger array */
|
||||
static inline size_t
|
||||
te_update_alloc_events(tsd_t *tsd, te_base_cb_t **to_trigger,
|
||||
uint64_t accumbytes, bool allow, uint64_t *wait) {
|
||||
/*
|
||||
* We do not loop and invoke the functions via interface because
|
||||
* of the perf cost. This path is relatively hot, so we sacrifice
|
||||
* elegance for perf.
|
||||
*/
|
||||
size_t nto_trigger = 0;
|
||||
uint64_t *waits = tsd_te_datap_get_unsafe(tsd)->alloc_wait;
|
||||
if (opt_tcache_gc_incr_bytes > 0) {
|
||||
assert(te_alloc_handlers[te_alloc_tcache_gc]->enabled());
|
||||
if (te_update_wait(tsd, accumbytes, allow,
|
||||
&waits[te_alloc_tcache_gc], wait,
|
||||
te_alloc_handlers[te_alloc_tcache_gc],
|
||||
opt_tcache_gc_incr_bytes)) {
|
||||
to_trigger[nto_trigger++] =
|
||||
te_alloc_handlers[te_alloc_tcache_gc];
|
||||
}
|
||||
}
|
||||
return event_wait;
|
||||
#ifdef JEMALLOC_PROF
|
||||
if (opt_prof) {
|
||||
assert(te_alloc_handlers[te_alloc_prof_sample]->enabled());
|
||||
if(te_update_wait(tsd, accumbytes, allow,
|
||||
&waits[te_alloc_prof_sample], wait,
|
||||
te_alloc_handlers[te_alloc_prof_sample], 0)) {
|
||||
to_trigger[nto_trigger++] =
|
||||
te_alloc_handlers[te_alloc_prof_sample];
|
||||
}
|
||||
}
|
||||
#endif
|
||||
if (opt_stats_interval >= 0) {
|
||||
if (te_update_wait(tsd, accumbytes, allow,
|
||||
&waits[te_alloc_stats_interval],
|
||||
wait,
|
||||
te_alloc_handlers[te_alloc_stats_interval],
|
||||
stats_interval_accum_batch)) {
|
||||
assert(te_alloc_handlers[te_alloc_stats_interval]->enabled());
|
||||
to_trigger[nto_trigger++] =
|
||||
te_alloc_handlers[te_alloc_stats_interval];
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
assert(te_alloc_handlers[te_alloc_peak]->enabled());
|
||||
if(te_update_wait(tsd, accumbytes, allow, &waits[te_alloc_peak], wait,
|
||||
te_alloc_handlers[te_alloc_peak], PEAK_EVENT_WAIT)) {
|
||||
to_trigger[nto_trigger++] = te_alloc_handlers[te_alloc_peak];
|
||||
}
|
||||
|
||||
assert(te_alloc_handlers[te_alloc_prof_threshold]->enabled());
|
||||
if(te_update_wait(tsd, accumbytes, allow,
|
||||
&waits[te_alloc_prof_threshold], wait,
|
||||
te_alloc_handlers[te_alloc_prof_threshold],
|
||||
1 << opt_experimental_lg_prof_threshold)) {
|
||||
to_trigger[nto_trigger++] = te_alloc_handlers[te_alloc_prof_threshold];
|
||||
}
|
||||
#endif
|
||||
return nto_trigger;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
te_update_dalloc_events(tsd_t *tsd, te_base_cb_t **to_trigger, uint64_t accumbytes,
|
||||
bool allow, uint64_t *wait) {
|
||||
size_t nto_trigger = 0;
|
||||
uint64_t *waits = tsd_te_datap_get_unsafe(tsd)->dalloc_wait;
|
||||
if (opt_tcache_gc_incr_bytes > 0) {
|
||||
assert(te_dalloc_handlers[te_dalloc_tcache_gc]->enabled());
|
||||
if (te_update_wait(tsd, accumbytes, allow,
|
||||
&waits[te_dalloc_tcache_gc], wait,
|
||||
te_dalloc_handlers[te_dalloc_tcache_gc],
|
||||
opt_tcache_gc_incr_bytes)) {
|
||||
to_trigger[nto_trigger++] =
|
||||
te_dalloc_handlers[te_dalloc_tcache_gc];
|
||||
}
|
||||
}
|
||||
#ifdef JEMALLOC_STATS
|
||||
assert(te_dalloc_handlers[te_dalloc_peak]->enabled());
|
||||
if(te_update_wait(tsd, accumbytes, allow, &waits[te_dalloc_peak], wait,
|
||||
te_dalloc_handlers[te_dalloc_peak], PEAK_EVENT_WAIT)) {
|
||||
to_trigger[nto_trigger++] = te_dalloc_handlers[te_dalloc_peak];
|
||||
}
|
||||
#endif
|
||||
return nto_trigger;
|
||||
}
|
||||
|
||||
void
|
||||
|
|
@ -263,47 +315,32 @@ te_event_trigger(tsd_t *tsd, te_ctx_t *ctx) {
|
|||
|
||||
bool allow_event_trigger = tsd_nominal(tsd) &&
|
||||
tsd_reentrancy_level_get(tsd) == 0;
|
||||
bool is_alloc = ctx->is_alloc;
|
||||
uint64_t wait = TE_MAX_START_WAIT;
|
||||
|
||||
#define E(event, condition, alloc_event) \
|
||||
bool is_##event##_triggered = false; \
|
||||
if (is_alloc == alloc_event && condition) { \
|
||||
uint64_t event_wait = event##_event_wait_get(tsd); \
|
||||
assert(event_wait <= TE_MAX_START_WAIT); \
|
||||
if (event_wait > accumbytes) { \
|
||||
event_wait -= accumbytes; \
|
||||
} else if (!allow_event_trigger) { \
|
||||
event_wait = event##_postponed_event_wait(tsd); \
|
||||
} else { \
|
||||
is_##event##_triggered = true; \
|
||||
event_wait = event##_new_event_wait(tsd); \
|
||||
} \
|
||||
event_wait = te_clip_event_wait(event_wait); \
|
||||
event##_event_wait_set(tsd, event_wait); \
|
||||
if (event_wait < wait) { \
|
||||
wait = event_wait; \
|
||||
} \
|
||||
assert((int)te_alloc_count >= (int) te_dalloc_count);
|
||||
te_base_cb_t *to_trigger[te_alloc_count];
|
||||
size_t nto_trigger;
|
||||
if (ctx->is_alloc) {
|
||||
nto_trigger = te_update_alloc_events(tsd, to_trigger,
|
||||
accumbytes,
|
||||
allow_event_trigger,
|
||||
&wait);
|
||||
} else {
|
||||
nto_trigger = te_update_dalloc_events(tsd, to_trigger,
|
||||
accumbytes,
|
||||
allow_event_trigger,
|
||||
&wait);
|
||||
}
|
||||
|
||||
ITERATE_OVER_ALL_EVENTS
|
||||
#undef E
|
||||
|
||||
assert(wait <= TE_MAX_START_WAIT);
|
||||
assert(wait <= TE_MAX_START_WAIT);
|
||||
te_adjust_thresholds_helper(tsd, ctx, wait);
|
||||
te_assert_invariants(tsd);
|
||||
|
||||
#define E(event, condition, alloc_event) \
|
||||
if (is_alloc == alloc_event && condition && \
|
||||
is_##event##_triggered) { \
|
||||
assert(allow_event_trigger); \
|
||||
uint64_t elapsed = event##_fetch_elapsed(tsd); \
|
||||
event##_event_handler(tsd, elapsed); \
|
||||
for (size_t i = 0; i < nto_trigger; i++) {
|
||||
assert(allow_event_trigger);
|
||||
to_trigger[i]->event_handler(tsd);
|
||||
}
|
||||
|
||||
ITERATE_OVER_ALL_EVENTS
|
||||
#undef E
|
||||
|
||||
te_assert_invariants(tsd);
|
||||
}
|
||||
|
||||
|
|
@ -323,18 +360,8 @@ te_init(tsd_t *tsd, bool is_alloc) {
|
|||
te_ctx_last_event_set(&ctx, te_ctx_current_bytes_get(&ctx));
|
||||
|
||||
uint64_t wait = TE_MAX_START_WAIT;
|
||||
#define E(event, condition, alloc_event) \
|
||||
if (is_alloc == alloc_event && condition) { \
|
||||
uint64_t event_wait = event##_new_event_wait(tsd); \
|
||||
event_wait = te_clip_event_wait(event_wait); \
|
||||
event##_event_wait_set(tsd, event_wait); \
|
||||
if (event_wait < wait) { \
|
||||
wait = event_wait; \
|
||||
} \
|
||||
}
|
||||
te_init_waits(tsd, &wait, is_alloc);
|
||||
|
||||
ITERATE_OVER_ALL_EVENTS
|
||||
#undef E
|
||||
te_adjust_thresholds_helper(tsd, &ctx, wait);
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue