mirror of
https://github.com/jemalloc/jemalloc.git
synced 2026-05-02 01:57:59 +03:00
Implement deallocation events.
Make the event module to accept two event types, and pass around the event context. Use bytes-based events to trigger tcache GC on deallocation, and get rid of the tcache ticker.
This commit is contained in:
parent
536ea6858e
commit
97dd79db6c
9 changed files with 306 additions and 171 deletions
|
|
@ -5,7 +5,6 @@
|
|||
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||
#include "jemalloc/internal/sc.h"
|
||||
#include "jemalloc/internal/sz.h"
|
||||
#include "jemalloc/internal/ticker.h"
|
||||
#include "jemalloc/internal/util.h"
|
||||
|
||||
static inline bool
|
||||
|
|
@ -27,17 +26,6 @@ tcache_enabled_set(tsd_t *tsd, bool enabled) {
|
|||
tsd_slow_update(tsd);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
tcache_event(tsd_t *tsd, tcache_t *tcache) {
|
||||
if (TCACHE_GC_INCR == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (unlikely(ticker_tick(&tcache->gc_ticker))) {
|
||||
tcache_event_hard(tsd, tcache);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
||||
size_t size, szind_t binind, bool zero, bool slow_path) {
|
||||
|
|
@ -171,8 +159,6 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
|
|||
bool ret = cache_bin_dalloc_easy(bin, ptr);
|
||||
assert(ret);
|
||||
}
|
||||
|
||||
tcache_event(tsd, tcache);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
|
|
@ -195,8 +181,6 @@ tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
|
|||
bool ret = cache_bin_dalloc_easy(bin, ptr);
|
||||
assert(ret);
|
||||
}
|
||||
|
||||
tcache_event(tsd, tcache);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE tcache_t *
|
||||
|
|
|
|||
|
|
@ -16,9 +16,6 @@ struct tcache_s {
|
|||
* together at the start of this struct.
|
||||
*/
|
||||
|
||||
/* Drives incremental GC. */
|
||||
ticker_t gc_ticker;
|
||||
|
||||
/*
|
||||
* The pointer stacks associated with bins follow as a contiguous array.
|
||||
* During tcache initialization, the avail pointer in each element of
|
||||
|
|
|
|||
|
|
@ -4,42 +4,51 @@
|
|||
#include "jemalloc/internal/tsd.h"
|
||||
|
||||
/*
|
||||
* Maximum threshold on thread_allocated_next_event_fast, so that there is no
|
||||
* need to check overflow in malloc fast path. (The allocation size in malloc
|
||||
* Maximum threshold on thread_(de)allocated_next_event_fast, so that there is
|
||||
* no need to check overflow in malloc fast path. (The allocation size in malloc
|
||||
* fast path never exceeds SC_LOOKUP_MAXCLASS.)
|
||||
*/
|
||||
#define THREAD_ALLOCATED_NEXT_EVENT_FAST_MAX \
|
||||
#define THREAD_NEXT_EVENT_FAST_MAX \
|
||||
(UINT64_MAX - SC_LOOKUP_MAXCLASS + 1U)
|
||||
|
||||
/*
|
||||
* The max interval helps make sure that malloc stays on the fast path in the
|
||||
* common case, i.e. thread_allocated < thread_allocated_next_event_fast.
|
||||
* When thread_allocated is within an event's distance to
|
||||
* THREAD_ALLOCATED_NEXT_EVENT_FAST_MAX above, thread_allocated_next_event_fast
|
||||
* is wrapped around and we fall back to the medium-fast path. The max interval
|
||||
* makes sure that we're not staying on the fallback case for too long, even if
|
||||
* there's no active event or if all active events have long wait times.
|
||||
* common case, i.e. thread_allocated < thread_allocated_next_event_fast. When
|
||||
* thread_allocated is within an event's distance to THREAD_NEXT_EVENT_FAST_MAX
|
||||
* above, thread_allocated_next_event_fast is wrapped around and we fall back to
|
||||
* the medium-fast path. The max interval makes sure that we're not staying on
|
||||
* the fallback case for too long, even if there's no active event or if all
|
||||
* active events have long wait times.
|
||||
*/
|
||||
#define THREAD_EVENT_MAX_INTERVAL ((uint64_t)(4U << 20))
|
||||
|
||||
typedef struct event_ctx_s {
|
||||
bool is_alloc;
|
||||
uint64_t *current;
|
||||
uint64_t *last_event;
|
||||
uint64_t *next_event;
|
||||
uint64_t *next_event_fast;
|
||||
} event_ctx_t;
|
||||
|
||||
void thread_event_assert_invariants_debug(tsd_t *tsd);
|
||||
void thread_event_trigger(tsd_t *tsd, bool delay_event);
|
||||
void thread_event_rollback(tsd_t *tsd, size_t diff);
|
||||
void thread_event_update(tsd_t *tsd);
|
||||
void thread_event_trigger(tsd_t *tsd, event_ctx_t *ctx, bool delay_event);
|
||||
void thread_alloc_event_rollback(tsd_t *tsd, size_t diff);
|
||||
void thread_event_update(tsd_t *tsd, bool alloc_event);
|
||||
void thread_event_boot();
|
||||
void thread_event_recompute_fast_threshold(tsd_t *tsd);
|
||||
void tsd_thread_event_init(tsd_t *tsd);
|
||||
|
||||
/*
|
||||
* List of all events, in the following format:
|
||||
* E(event, (condition))
|
||||
* E(event, (condition), is_alloc_event)
|
||||
*/
|
||||
#define ITERATE_OVER_ALL_EVENTS \
|
||||
E(tcache_gc, (TCACHE_GC_INCR_BYTES > 0)) \
|
||||
E(prof_sample, (config_prof && opt_prof)) \
|
||||
E(stats_interval, (opt_stats_interval >= 0))
|
||||
E(tcache_gc, (TCACHE_GC_INCR_BYTES > 0), true) \
|
||||
E(prof_sample, (config_prof && opt_prof), true) \
|
||||
E(stats_interval, (opt_stats_interval >= 0), true) \
|
||||
E(tcache_gc_dalloc, (TCACHE_GC_INCR_BYTES > 0), false)
|
||||
|
||||
#define E(event, condition) \
|
||||
#define E(event, condition_unused, is_alloc_event_unused) \
|
||||
C(event##_event_wait)
|
||||
|
||||
/* List of all thread event counters. */
|
||||
|
|
@ -83,9 +92,9 @@ ITERATE_OVER_ALL_COUNTERS
|
|||
#undef E
|
||||
|
||||
/*
|
||||
* Two malloc fastpath getters -- use the unsafe getters since tsd may be
|
||||
* non-nominal, in which case the fast_threshold will be set to 0. This allows
|
||||
* checking for events and tsd non-nominal in a single branch.
|
||||
* The malloc and free fastpath getters -- use the unsafe getters since tsd may
|
||||
* be non-nominal, in which case the fast_threshold will be set to 0. This
|
||||
* allows checking for events and tsd non-nominal in a single branch.
|
||||
*
|
||||
* Note that these can only be used on the fastpath.
|
||||
*/
|
||||
|
|
@ -97,42 +106,83 @@ thread_allocated_malloc_fastpath(tsd_t *tsd) {
|
|||
JEMALLOC_ALWAYS_INLINE uint64_t
|
||||
thread_allocated_next_event_malloc_fastpath(tsd_t *tsd) {
|
||||
uint64_t v = *tsd_thread_allocated_next_event_fastp_get_unsafe(tsd);
|
||||
assert(v <= THREAD_ALLOCATED_NEXT_EVENT_FAST_MAX);
|
||||
assert(v <= THREAD_NEXT_EVENT_FAST_MAX);
|
||||
return v;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
thread_event_free_fastpath_ctx(tsd_t *tsd, uint64_t *deallocated,
|
||||
uint64_t *threshold, bool size_hint) {
|
||||
if (!size_hint) {
|
||||
*deallocated = tsd_thread_deallocated_get(tsd);
|
||||
*threshold = tsd_thread_deallocated_next_event_fast_get(tsd);
|
||||
} else {
|
||||
/* Unsafe getters since this may happen before tsd_init. */
|
||||
*deallocated = *tsd_thread_deallocatedp_get_unsafe(tsd);
|
||||
*threshold =
|
||||
*tsd_thread_deallocated_next_event_fastp_get_unsafe(tsd);
|
||||
}
|
||||
assert(*threshold <= THREAD_NEXT_EVENT_FAST_MAX);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
event_ctx_is_alloc(event_ctx_t *ctx) {
|
||||
return ctx->is_alloc;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uint64_t
|
||||
event_ctx_current_bytes_get(event_ctx_t *ctx) {
|
||||
return *ctx->current;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
event_ctx_current_bytes_set(event_ctx_t *ctx, uint64_t v) {
|
||||
*ctx->current = v;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uint64_t
|
||||
event_ctx_last_event_get(event_ctx_t *ctx) {
|
||||
return *ctx->last_event;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
event_ctx_last_event_set(event_ctx_t *ctx, uint64_t v) {
|
||||
*ctx->last_event = v;
|
||||
}
|
||||
|
||||
/* Below 3 for next_event_fast. */
|
||||
JEMALLOC_ALWAYS_INLINE uint64_t
|
||||
thread_allocated_next_event_fast_get(tsd_t *tsd) {
|
||||
uint64_t v = tsd_thread_allocated_next_event_fast_get(tsd);
|
||||
assert(v <= THREAD_ALLOCATED_NEXT_EVENT_FAST_MAX);
|
||||
event_ctx_next_event_fast_get(event_ctx_t *ctx) {
|
||||
uint64_t v = *ctx->next_event_fast;
|
||||
assert(v <= THREAD_NEXT_EVENT_FAST_MAX);
|
||||
return v;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
thread_allocated_next_event_fast_set(tsd_t *tsd, uint64_t v) {
|
||||
assert(v <= THREAD_ALLOCATED_NEXT_EVENT_FAST_MAX);
|
||||
*tsd_thread_allocated_next_event_fastp_get(tsd) = v;
|
||||
event_ctx_next_event_fast_set(event_ctx_t *ctx, uint64_t v) {
|
||||
assert(v <= THREAD_NEXT_EVENT_FAST_MAX);
|
||||
*ctx->next_event_fast = v;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
thread_allocated_next_event_fast_set_non_nominal(tsd_t *tsd) {
|
||||
thread_next_event_fast_set_non_nominal(tsd_t *tsd) {
|
||||
/*
|
||||
* Set the fast threshold to zero when tsd is non-nominal. Use the
|
||||
* Set the fast thresholds to zero when tsd is non-nominal. Use the
|
||||
* unsafe getter as this may get called during tsd init and clean up.
|
||||
*/
|
||||
*tsd_thread_allocated_next_event_fastp_get_unsafe(tsd) = 0;
|
||||
*tsd_thread_deallocated_next_event_fastp_get_unsafe(tsd) = 0;
|
||||
}
|
||||
|
||||
/* For next_event. Setter also updates the fast threshold. */
|
||||
JEMALLOC_ALWAYS_INLINE uint64_t
|
||||
thread_allocated_next_event_get(tsd_t *tsd) {
|
||||
return tsd_thread_allocated_next_event_get(tsd);
|
||||
event_ctx_next_event_get(event_ctx_t *ctx) {
|
||||
return *ctx->next_event;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
thread_allocated_next_event_set(tsd_t *tsd, uint64_t v) {
|
||||
*tsd_thread_allocated_next_eventp_get(tsd) = v;
|
||||
event_ctx_next_event_set(tsd_t *tsd, event_ctx_t *ctx, uint64_t v) {
|
||||
*ctx->next_event = v;
|
||||
thread_event_recompute_fast_threshold(tsd);
|
||||
}
|
||||
|
||||
|
|
@ -145,8 +195,8 @@ thread_allocated_next_event_set(tsd_t *tsd, uint64_t v) {
|
|||
* at the end will restore the invariants),
|
||||
* (b) thread_##event##_event_update() (the thread_event_update() call at the
|
||||
* end will restore the invariants), or
|
||||
* (c) thread_event_rollback() if the rollback falls below the last_event (the
|
||||
* thread_event_update() call at the end will restore the invariants).
|
||||
* (c) thread_alloc_event_rollback() if the rollback falls below the last_event
|
||||
* (the thread_event_update() call at the end will restore the invariants).
|
||||
*/
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
thread_event_assert_invariants(tsd_t *tsd) {
|
||||
|
|
@ -156,22 +206,52 @@ thread_event_assert_invariants(tsd_t *tsd) {
|
|||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
thread_event(tsd_t *tsd, size_t usize) {
|
||||
thread_event_assert_invariants(tsd);
|
||||
|
||||
uint64_t thread_allocated_before = thread_allocated_get(tsd);
|
||||
thread_allocated_set(tsd, thread_allocated_before + usize);
|
||||
|
||||
/* The subtraction is intentionally susceptible to underflow. */
|
||||
if (likely(usize < thread_allocated_next_event_get(tsd) -
|
||||
thread_allocated_before)) {
|
||||
thread_event_assert_invariants(tsd);
|
||||
event_ctx_get(tsd_t *tsd, event_ctx_t *ctx, bool is_alloc) {
|
||||
ctx->is_alloc = is_alloc;
|
||||
if (is_alloc) {
|
||||
ctx->current = tsd_thread_allocatedp_get(tsd);
|
||||
ctx->last_event = tsd_thread_allocated_last_eventp_get(tsd);
|
||||
ctx->next_event = tsd_thread_allocated_next_eventp_get(tsd);
|
||||
ctx->next_event_fast =
|
||||
tsd_thread_allocated_next_event_fastp_get(tsd);
|
||||
} else {
|
||||
thread_event_trigger(tsd, false);
|
||||
ctx->current = tsd_thread_deallocatedp_get(tsd);
|
||||
ctx->last_event = tsd_thread_deallocated_last_eventp_get(tsd);
|
||||
ctx->next_event = tsd_thread_deallocated_next_eventp_get(tsd);
|
||||
ctx->next_event_fast =
|
||||
tsd_thread_deallocated_next_event_fastp_get(tsd);
|
||||
}
|
||||
}
|
||||
|
||||
#define E(event, condition) \
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
thread_event_advance(tsd_t *tsd, size_t usize, bool is_alloc) {
|
||||
thread_event_assert_invariants(tsd);
|
||||
|
||||
event_ctx_t ctx;
|
||||
event_ctx_get(tsd, &ctx, is_alloc);
|
||||
|
||||
uint64_t bytes_before = event_ctx_current_bytes_get(&ctx);
|
||||
event_ctx_current_bytes_set(&ctx, bytes_before + usize);
|
||||
|
||||
/* The subtraction is intentionally susceptible to underflow. */
|
||||
if (likely(usize < event_ctx_next_event_get(&ctx) - bytes_before)) {
|
||||
thread_event_assert_invariants(tsd);
|
||||
} else {
|
||||
thread_event_trigger(tsd, &ctx, false);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
thread_dalloc_event(tsd_t *tsd, size_t usize) {
|
||||
thread_event_advance(tsd, usize, false);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
thread_alloc_event(tsd_t *tsd, size_t usize) {
|
||||
thread_event_advance(tsd, usize, true);
|
||||
}
|
||||
|
||||
#define E(event, condition, is_alloc) \
|
||||
JEMALLOC_ALWAYS_INLINE void \
|
||||
thread_##event##_event_update(tsd_t *tsd, uint64_t event_wait) { \
|
||||
thread_event_assert_invariants(tsd); \
|
||||
|
|
@ -188,7 +268,7 @@ thread_##event##_event_update(tsd_t *tsd, uint64_t event_wait) { \
|
|||
event_wait = THREAD_EVENT_MAX_START_WAIT; \
|
||||
} \
|
||||
event##_event_wait_set(tsd, event_wait); \
|
||||
thread_event_update(tsd); \
|
||||
thread_event_update(tsd, is_alloc); \
|
||||
}
|
||||
|
||||
ITERATE_OVER_ALL_EVENTS
|
||||
|
|
|
|||
|
|
@ -81,10 +81,14 @@ typedef void (*test_callback_t)(int *);
|
|||
O(thread_allocated, uint64_t, uint64_t) \
|
||||
O(thread_allocated_next_event_fast, uint64_t, uint64_t) \
|
||||
O(thread_deallocated, uint64_t, uint64_t) \
|
||||
O(thread_deallocated_next_event_fast, uint64_t, uint64_t) \
|
||||
O(rtree_ctx, rtree_ctx_t, rtree_ctx_t) \
|
||||
O(thread_allocated_last_event, uint64_t, uint64_t) \
|
||||
O(thread_allocated_next_event, uint64_t, uint64_t) \
|
||||
O(thread_deallocated_last_event, uint64_t, uint64_t) \
|
||||
O(thread_deallocated_next_event, uint64_t, uint64_t) \
|
||||
O(tcache_gc_event_wait, uint64_t, uint64_t) \
|
||||
O(tcache_gc_dalloc_event_wait, uint64_t, uint64_t) \
|
||||
O(prof_sample_event_wait, uint64_t, uint64_t) \
|
||||
O(prof_sample_last_event, uint64_t, uint64_t) \
|
||||
O(stats_interval_event_wait, uint64_t, uint64_t) \
|
||||
|
|
@ -114,10 +118,14 @@ typedef void (*test_callback_t)(int *);
|
|||
/* thread_allocated */ 0, \
|
||||
/* thread_allocated_next_event_fast */ 0, \
|
||||
/* thread_deallocated */ 0, \
|
||||
/* thread_deallocated_next_event_fast */ 0, \
|
||||
/* rtree_ctx */ RTREE_CTX_ZERO_INITIALIZER, \
|
||||
/* thread_allocated_last_event */ 0, \
|
||||
/* thread_allocated_next_event */ THREAD_EVENT_MIN_START_WAIT, \
|
||||
/* thread_deallocated_last_event */ 0, \
|
||||
/* thread_deallocated_next_event */ THREAD_EVENT_MIN_START_WAIT, \
|
||||
/* tcache_gc_event_wait */ THREAD_EVENT_MIN_START_WAIT, \
|
||||
/* tcache_gc_dalloc_event_wait */ THREAD_EVENT_MIN_START_WAIT, \
|
||||
/* prof_sample_event_wait */ THREAD_EVENT_MIN_START_WAIT, \
|
||||
/* prof_sample_last_event */ 0, \
|
||||
/* stats_interval_event_wait */ THREAD_EVENT_MIN_START_WAIT, \
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue