Remove generic experimental hooks

This commit is contained in:
Slobodan Predolac 2026-05-12 15:34:09 -07:00
parent 3f9d8ca3d0
commit ff2c2548a3
20 changed files with 25 additions and 1636 deletions

View file

@ -124,7 +124,6 @@ C_SRCS := $(srcroot)src/jemalloc.c \
$(srcroot)src/fxp.c \
$(srcroot)src/san.c \
$(srcroot)src/san_bump.c \
$(srcroot)src/hook.c \
$(srcroot)src/hpa.c \
$(srcroot)src/hpa_central.c \
$(srcroot)src/hpa_hooks.c \
@ -239,7 +238,6 @@ TESTS_UNIT := \
${srcroot}test/unit/san.c \
${srcroot}test/unit/san_bump.c \
$(srcroot)test/unit/hash.c \
$(srcroot)test/unit/hook.c \
$(srcroot)test/unit/hpa.c \
$(srcroot)test/unit/hpa_sec_integration.c \
$(srcroot)test/unit/hpa_thp_always.c \
@ -296,7 +294,6 @@ TESTS_UNIT := \
$(srcroot)test/unit/safety_check.c \
$(srcroot)test/unit/sc.c \
$(srcroot)test/unit/sec.c \
$(srcroot)test/unit/seq.c \
$(srcroot)test/unit/SFMT.c \
$(srcroot)test/unit/size_check.c \
$(srcroot)test/unit/size_classes.c \
@ -358,7 +355,6 @@ TESTS_ANALYZE := $(srcroot)test/analyze/prof_bias.c \
$(srcroot)test/analyze/sizes.c
TESTS_STRESS := $(srcroot)test/stress/batch_alloc.c \
$(srcroot)test/stress/fill_flush.c \
$(srcroot)test/stress/hookbench.c \
$(srcroot)test/stress/large_microbench.c \
$(srcroot)test/stress/mallctl.c \
$(srcroot)test/stress/microbench.c

View file

@ -7,7 +7,6 @@
#include "jemalloc/internal/div.h"
#include "jemalloc/internal/emap.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/hook.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/stats.h"
@ -86,8 +85,7 @@ void arena_ptr_array_flush(tsd_t *tsd, szind_t binind,
bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero, size_t *newsize);
void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
size_t size, size_t alignment, bool zero, bool slab, tcache_t *tcache,
hook_ralloc_args_t *hook_args);
size_t size, size_t alignment, bool zero, bool slab, tcache_t *tcache);
dss_prec_t arena_dss_prec_get(const arena_t *arena);
ehooks_t *arena_get_ehooks(const arena_t *arena);
extent_hooks_t *arena_set_extent_hooks(

View file

@ -1,163 +0,0 @@
#ifndef JEMALLOC_INTERNAL_HOOK_H
#define JEMALLOC_INTERNAL_HOOK_H
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/tsd.h"
/*
* This API is *extremely* experimental, and may get ripped out, changed in API-
* and ABI-incompatible ways, be insufficiently or incorrectly documented, etc.
*
* It allows hooking the stateful parts of the API to see changes as they
* happen.
*
* Allocation hooks are called after the allocation is done, free hooks are
* called before the free is done, and expand hooks are called after the
* allocation is expanded.
*
* For realloc and rallocx, if the expansion happens in place, the expansion
* hook is called. If it is moved, then the alloc hook is called on the new
* location, and then the free hook is called on the old location (i.e. both
* hooks are invoked in between the alloc and the dalloc).
*
* If we return NULL from OOM, then usize might not be trustworthy. Calling
* realloc(NULL, size) only calls the alloc hook, and calling realloc(ptr, 0)
* only calls the free hook. (Calling realloc(NULL, 0) is treated as malloc(0),
* and only calls the alloc hook).
*
* Reentrancy:
* Reentrancy is guarded against from within the hook implementation. If you
* call allocator functions from within a hook, the hooks will not be invoked
* again.
* Threading:
* The installation of a hook synchronizes with all its uses. If you can
* prove the installation of a hook happens-before a jemalloc entry point,
* then the hook will get invoked (unless there's a racing removal).
*
* Hook insertion appears to be atomic at a per-thread level (i.e. if a thread
* allocates and has the alloc hook invoked, then a subsequent free on the
* same thread will also have the free hook invoked).
*
* The *removal* of a hook does *not* block until all threads are done with
* the hook. Hook authors have to be resilient to this, and need some
* out-of-band mechanism for cleaning up any dynamically allocated memory
* associated with their hook.
* Ordering:
* Order of hook execution is unspecified, and may be different than insertion
* order.
*/
#define HOOK_MAX 4
enum hook_alloc_e {
hook_alloc_malloc,
hook_alloc_posix_memalign,
hook_alloc_aligned_alloc,
hook_alloc_calloc,
hook_alloc_memalign,
hook_alloc_valloc,
hook_alloc_pvalloc,
hook_alloc_mallocx,
/* The reallocating functions have both alloc and dalloc variants */
hook_alloc_realloc,
hook_alloc_rallocx,
};
/*
* We put the enum typedef after the enum, since this file may get included by
* jemalloc_cpp.cpp, and C++ disallows enum forward declarations.
*/
typedef enum hook_alloc_e hook_alloc_t;
enum hook_dalloc_e {
hook_dalloc_free,
hook_dalloc_dallocx,
hook_dalloc_sdallocx,
/*
* The dalloc halves of reallocation (not called if in-place expansion
* happens).
*/
hook_dalloc_realloc,
hook_dalloc_rallocx,
};
typedef enum hook_dalloc_e hook_dalloc_t;
enum hook_expand_e {
hook_expand_realloc,
hook_expand_rallocx,
hook_expand_xallocx,
};
typedef enum hook_expand_e hook_expand_t;
typedef void (*hook_alloc)(void *extra, hook_alloc_t type, void *result,
uintptr_t result_raw, uintptr_t args_raw[3]);
typedef void (*hook_dalloc)(
void *extra, hook_dalloc_t type, void *address, uintptr_t args_raw[3]);
typedef void (*hook_expand)(void *extra, hook_expand_t type, void *address,
size_t old_usize, size_t new_usize, uintptr_t result_raw,
uintptr_t args_raw[4]);
typedef struct hooks_s hooks_t;
struct hooks_s {
hook_alloc alloc_hook;
hook_dalloc dalloc_hook;
hook_expand expand_hook;
void *extra;
};
/*
* Begin implementation details; everything above this point might one day live
* in a public API. Everything below this point never will.
*/
/*
* The realloc pathways haven't gotten any refactoring love in a while, and it's
* fairly difficult to pass information from the entry point to the hooks. We
* put the informaiton the hooks will need into a struct to encapsulate
* everything.
*
* Much of these pathways are force-inlined, so that the compiler can avoid
* materializing this struct until we hit an extern arena function. For fairly
* goofy reasons, *many* of the realloc paths hit an extern arena function.
* These paths are cold enough that it doesn't matter; eventually, we should
* rewrite the realloc code to make the expand-in-place and the
* free-then-realloc paths more orthogonal, at which point we don't need to
* spread the hook logic all over the place.
*/
typedef struct hook_ralloc_args_s hook_ralloc_args_t;
struct hook_ralloc_args_s {
/* I.e. as opposed to rallocx. */
bool is_realloc;
/*
* The expand hook takes 4 arguments, even if only 3 are actually used;
* we add an extra one in case the user decides to memcpy without
* looking too closely at the hooked function.
*/
uintptr_t args[4];
};
/*
* Returns an opaque handle to be used when removing the hook. NULL means that
* we couldn't install the hook.
*/
bool hook_boot(void);
void *hook_install(tsdn_t *tsdn, hooks_t *to_install);
/* Uninstalls the hook with the handle previously returned from hook_install. */
void hook_remove(tsdn_t *tsdn, void *opaque);
/* Hooks */
void hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw,
uintptr_t args_raw[3]);
void hook_invoke_dalloc(
hook_dalloc_t type, void *address, uintptr_t args_raw[3]);
void hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize,
size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]);
#endif /* JEMALLOC_INTERNAL_HOOK_H */

View file

@ -5,7 +5,6 @@
#include "jemalloc/internal/arena_externs.h"
#include "jemalloc/internal/arena_inlines_b.h"
#include "jemalloc/internal/emap.h"
#include "jemalloc/internal/hook.h"
#include "jemalloc/internal/jemalloc_init.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/log.h"
@ -175,8 +174,7 @@ isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
JEMALLOC_ALWAYS_INLINE void *
iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
size_t alignment, bool zero, bool slab, tcache_t *tcache, arena_t *arena,
hook_ralloc_args_t *hook_args) {
size_t alignment, bool zero, bool slab, tcache_t *tcache, arena_t *arena) {
witness_assert_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0);
void *p;
@ -197,27 +195,13 @@ iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(p, ptr, copysize);
hook_invoke_alloc(
hook_args->is_realloc ? hook_alloc_realloc : hook_alloc_rallocx, p,
(uintptr_t)p, hook_args->args);
hook_invoke_dalloc(
hook_args->is_realloc ? hook_dalloc_realloc : hook_dalloc_rallocx,
ptr, hook_args->args);
isdalloct(tsdn, ptr, oldsize, tcache, NULL, true);
return p;
}
/*
* is_realloc threads through the knowledge of whether or not this call comes
* from je_realloc (as opposed to je_rallocx); this ensures that we pass the
* correct entry point into any hooks.
* Note that these functions are all force-inlined, so no actual bool gets
* passed-around anywhere.
*/
JEMALLOC_ALWAYS_INLINE void *
iralloct_explicit_slab(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
size_t alignment, bool zero, bool slab, tcache_t *tcache, arena_t *arena,
hook_ralloc_args_t *hook_args) {
size_t alignment, bool zero, bool slab, tcache_t *tcache, arena_t *arena) {
assert(ptr != NULL);
assert(size != 0);
witness_assert_depth_to_rank(
@ -230,27 +214,26 @@ iralloct_explicit_slab(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
* and copy.
*/
return iralloct_realign(tsdn, ptr, oldsize, size, alignment,
zero, slab, tcache, arena, hook_args);
zero, slab, tcache, arena);
}
return arena_ralloc(tsdn, arena, ptr, oldsize, size, alignment, zero,
slab, tcache, hook_args);
slab, tcache);
}
JEMALLOC_ALWAYS_INLINE void *
iralloct(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t alignment,
size_t usize, bool zero, tcache_t *tcache, arena_t *arena,
hook_ralloc_args_t *hook_args) {
size_t usize, bool zero, tcache_t *tcache, arena_t *arena) {
bool slab = sz_can_use_slab(usize);
return iralloct_explicit_slab(tsdn, ptr, oldsize, size, alignment, zero,
slab, tcache, arena, hook_args);
slab, tcache, arena);
}
JEMALLOC_ALWAYS_INLINE void *
iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
size_t usize, bool zero, hook_ralloc_args_t *hook_args) {
size_t usize, bool zero) {
return iralloct(tsd_tsdn(tsd), ptr, oldsize, size, alignment, usize,
zero, tcache_get(tsd), NULL, hook_args);
zero, tcache_get(tsd), NULL);
}
JEMALLOC_ALWAYS_INLINE bool

View file

@ -3,7 +3,6 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/edata.h"
#include "jemalloc/internal/hook.h"
void *large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
void *large_palloc(
@ -11,8 +10,7 @@ void *large_palloc(
bool large_ralloc_no_move(tsdn_t *tsdn, edata_t *edata, size_t usize_min,
size_t usize_max, bool zero);
void *large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
size_t alignment, bool zero, tcache_t *tcache,
hook_ralloc_args_t *hook_args);
size_t alignment, bool zero, tcache_t *tcache);
void large_dalloc_prep_locked(tsdn_t *tsdn, edata_t *edata);
void large_dalloc_finish(tsdn_t *tsdn, edata_t *edata);

View file

@ -1,58 +0,0 @@
#ifndef JEMALLOC_INTERNAL_SEQ_H
#define JEMALLOC_INTERNAL_SEQ_H
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/atomic.h"
/*
* A simple seqlock implementation.
*/
/* clang-format off */
#define seq_define(type, short_type) \
typedef struct { \
atomic_zu_t seq; \
atomic_zu_t data[ \
(sizeof(type) + sizeof(size_t) - 1) / sizeof(size_t)]; \
} seq_##short_type##_t; \
\
/* \
* No internal synchronization -- the caller must ensure that there's \
* only a single writer at a time. \
*/ \
static inline void \
seq_store_##short_type(seq_##short_type##_t *dst, type *src) { \
size_t buf[sizeof(dst->data) / sizeof(size_t)]; \
buf[sizeof(buf) / sizeof(size_t) - 1] = 0; \
memcpy(buf, src, sizeof(type)); \
size_t old_seq = atomic_load_zu(&dst->seq, ATOMIC_RELAXED); \
atomic_store_zu(&dst->seq, old_seq + 1, ATOMIC_RELAXED); \
atomic_fence(ATOMIC_RELEASE); \
for (size_t i = 0; i < sizeof(buf) / sizeof(size_t); i++) { \
atomic_store_zu(&dst->data[i], buf[i], ATOMIC_RELAXED); \
} \
atomic_store_zu(&dst->seq, old_seq + 2, ATOMIC_RELEASE); \
} \
\
/* Returns whether or not the read was consistent. */ \
static inline bool \
seq_try_load_##short_type(type *dst, seq_##short_type##_t *src) { \
size_t buf[sizeof(src->data) / sizeof(size_t)]; \
size_t seq1 = atomic_load_zu(&src->seq, ATOMIC_ACQUIRE); \
if (seq1 % 2 != 0) { \
return false; \
} \
for (size_t i = 0; i < sizeof(buf) / sizeof(size_t); i++) { \
buf[i] = atomic_load_zu(&src->data[i], ATOMIC_RELAXED); \
} \
atomic_fence(ATOMIC_ACQUIRE); \
size_t seq2 = atomic_load_zu(&src->seq, ATOMIC_RELAXED); \
if (seq1 != seq2) { \
return false; \
} \
memcpy(dst, buf, sizeof(type)); \
return true; \
}
/* clang-format on */
#endif /* JEMALLOC_INTERNAL_SEQ_H */

View file

@ -81,7 +81,6 @@ typedef ql_elm(tsd_t) tsd_link_t;
O(sec_shard, uint8_t, uint8_t) \
O(binshards, tsd_binshards_t, tsd_binshards_t) \
O(tsd_link, tsd_link_t, tsd_link_t) \
O(in_hook, bool, bool) \
O(peak, peak_t, peak_t) \
O(tcache_slow, tcache_slow_t, tcache_slow_t) \
O(rtree_ctx, rtree_ctx_t, rtree_ctx_t)
@ -101,8 +100,7 @@ typedef ql_elm(tsd_t) tsd_link_t;
TICKER_GEOM_INIT(ARENA_DECAY_NTICKS_PER_UPDATE), \
/* sec_shard */ (uint8_t) - 1, \
/* binshards */ TSD_BINSHARDS_ZERO_INITIALIZER, \
/* tsd_link */ {NULL}, /* in_hook */ false, \
/* peak */ PEAK_INITIALIZER, \
/* tsd_link */ {NULL}, /* peak */ PEAK_INITIALIZER, \
/* tcache_slow */ TCACHE_SLOW_ZERO_INITIALIZER, \
/* rtree_ctx */ RTREE_CTX_INITIALIZER,
@ -152,14 +150,6 @@ void tsd_prefork(tsd_t *tsd);
void tsd_postfork_parent(tsd_t *tsd);
void tsd_postfork_child(tsd_t *tsd);
/*
* Call ..._inc when your module wants to take all threads down the slow paths,
* and ..._dec when it no longer needs to.
*/
void tsd_global_slow_inc(tsdn_t *tsdn);
void tsd_global_slow_dec(tsdn_t *tsdn);
bool tsd_global_slow(void);
#define TSD_MIN_INIT_STATE_MAX_FETCHED (128)
enum {

View file

@ -1694,8 +1694,7 @@ arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
void *
arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
size_t size, size_t alignment, bool zero, bool slab, tcache_t *tcache,
hook_ralloc_args_t *hook_args) {
size_t size, size_t alignment, bool zero, bool slab, tcache_t *tcache) {
size_t usize = alignment == 0 ? sz_s2u(size) : sz_sa2u(size, alignment);
if (unlikely(usize == 0 || size > SC_LARGE_MAXCLASS)) {
return NULL;
@ -1707,18 +1706,13 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
UNUSED size_t newsize;
if (!arena_ralloc_no_move(
tsdn, ptr, oldsize, usize, 0, zero, &newsize)) {
hook_invoke_expand(hook_args->is_realloc
? hook_expand_realloc
: hook_expand_rallocx,
ptr, oldsize, usize, (uintptr_t)ptr,
hook_args->args);
return ptr;
}
}
if (oldsize >= SC_LARGE_MINCLASS && usize >= SC_LARGE_MINCLASS) {
return large_ralloc(tsdn, arena, ptr, usize, alignment, zero,
tcache, hook_args);
tcache);
}
/*
@ -1731,13 +1725,6 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
return NULL;
}
hook_invoke_alloc(
hook_args->is_realloc ? hook_alloc_realloc : hook_alloc_rallocx,
ret, (uintptr_t)ret, hook_args->args);
hook_invoke_dalloc(
hook_args->is_realloc ? hook_dalloc_realloc : hook_dalloc_rallocx,
ptr, hook_args->args);
/*
* Junk/zero-filling were already done by
* ipalloc()/arena_malloc().

View file

@ -361,8 +361,6 @@ CTL_PROTO(stats_retained)
CTL_PROTO(stats_pinned)
CTL_PROTO(stats_zero_reallocs)
CTL_PROTO(approximate_stats_active)
CTL_PROTO(experimental_hooks_install)
CTL_PROTO(experimental_hooks_remove)
CTL_PROTO(experimental_hooks_prof_backtrace)
CTL_PROTO(experimental_hooks_prof_dump)
CTL_PROTO(experimental_hooks_prof_sample)
@ -887,8 +885,6 @@ static const ctl_named_node_t stats_node[] = {
};
static const ctl_named_node_t experimental_hooks_node[] = {
{NAME("install"), CTL(experimental_hooks_install)},
{NAME("remove"), CTL(experimental_hooks_remove)},
{NAME("prof_backtrace"), CTL(experimental_hooks_prof_backtrace)},
{NAME("prof_dump"), CTL(experimental_hooks_prof_dump)},
{NAME("prof_sample"), CTL(experimental_hooks_prof_sample)},
@ -4223,51 +4219,6 @@ label_return:
return ret;
}
static int
experimental_hooks_install_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
if (oldp == NULL || oldlenp == NULL || newp == NULL) {
ret = EINVAL;
goto label_return;
}
/*
* Note: this is a *private* struct. This is an experimental interface;
* forcing the user to know the jemalloc internals well enough to
* extract the ABI hopefully ensures nobody gets too comfortable with
* this API, which can change at a moment's notice.
*/
hooks_t hooks;
WRITE(hooks, hooks_t);
void *handle = hook_install(tsd_tsdn(tsd), &hooks);
if (handle == NULL) {
ret = EAGAIN;
goto label_return;
}
READ(handle, void *);
ret = 0;
label_return:
return ret;
}
static int
experimental_hooks_remove_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
WRITEONLY();
void *handle = NULL;
WRITE(handle, void *);
if (handle == NULL) {
ret = EINVAL;
goto label_return;
}
hook_remove(tsd_tsdn(tsd), handle);
ret = 0;
label_return:
return ret;
}
/*
* Output six memory utilization entries for an input pointer, the first one of
* type (void *) and the remaining five of type size_t, describing the following

View file

@ -1,192 +0,0 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/hook.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/seq.h"
typedef struct hooks_internal_s hooks_internal_t;
struct hooks_internal_s {
hooks_t hooks;
bool in_use;
};
seq_define(hooks_internal_t, hooks)
static atomic_u_t nhooks = ATOMIC_INIT(0);
static seq_hooks_t hooks[HOOK_MAX];
static malloc_mutex_t hooks_mu;
bool
hook_boot(void) {
return malloc_mutex_init(
&hooks_mu, "hooks", WITNESS_RANK_HOOK, malloc_mutex_rank_exclusive);
}
static void *
hook_install_locked(hooks_t *to_install) {
hooks_internal_t hooks_internal;
for (int i = 0; i < HOOK_MAX; i++) {
bool success = seq_try_load_hooks(&hooks_internal, &hooks[i]);
/* We hold mu; no concurrent access. */
assert(success);
if (!hooks_internal.in_use) {
hooks_internal.hooks = *to_install;
hooks_internal.in_use = true;
seq_store_hooks(&hooks[i], &hooks_internal);
atomic_store_u(&nhooks,
atomic_load_u(&nhooks, ATOMIC_RELAXED) + 1,
ATOMIC_RELAXED);
return &hooks[i];
}
}
return NULL;
}
void *
hook_install(tsdn_t *tsdn, hooks_t *to_install) {
malloc_mutex_lock(tsdn, &hooks_mu);
void *ret = hook_install_locked(to_install);
if (ret != NULL) {
tsd_global_slow_inc(tsdn);
}
malloc_mutex_unlock(tsdn, &hooks_mu);
return ret;
}
static void
hook_remove_locked(seq_hooks_t *to_remove) {
hooks_internal_t hooks_internal;
bool success = seq_try_load_hooks(&hooks_internal, to_remove);
/* We hold mu; no concurrent access. */
assert(success);
/* Should only remove hooks that were added. */
assert(hooks_internal.in_use);
hooks_internal.in_use = false;
seq_store_hooks(to_remove, &hooks_internal);
atomic_store_u(&nhooks, atomic_load_u(&nhooks, ATOMIC_RELAXED) - 1,
ATOMIC_RELAXED);
}
void
hook_remove(tsdn_t *tsdn, void *opaque) {
if (config_debug) {
char *hooks_begin = (char *)&hooks[0];
char *hooks_end = (char *)&hooks[HOOK_MAX];
char *hook = (char *)opaque;
assert(hooks_begin <= hook && hook < hooks_end
&& (hook - hooks_begin) % sizeof(seq_hooks_t) == 0);
}
malloc_mutex_lock(tsdn, &hooks_mu);
hook_remove_locked((seq_hooks_t *)opaque);
tsd_global_slow_dec(tsdn);
malloc_mutex_unlock(tsdn, &hooks_mu);
}
#define FOR_EACH_HOOK_BEGIN(hooks_internal_ptr) \
for (int for_each_hook_counter = 0; for_each_hook_counter < HOOK_MAX; \
for_each_hook_counter++) { \
bool for_each_hook_success = seq_try_load_hooks( \
(hooks_internal_ptr), &hooks[for_each_hook_counter]); \
if (!for_each_hook_success) { \
continue; \
} \
if (!(hooks_internal_ptr)->in_use) { \
continue; \
}
#define FOR_EACH_HOOK_END }
static bool *
hook_reentrantp(void) {
/*
* We prevent user reentrancy within hooks. This is basically just a
* thread-local bool that triggers an early-exit.
*
* We don't fold in_hook into reentrancy. There are two reasons for
* this:
* - Right now, we turn on reentrancy during things like extent hook
* execution. Allocating during extent hooks is not officially
* supported, but we don't want to break it for the time being. These
* sorts of allocations should probably still be hooked, though.
* - If a hook allocates, we may want it to be relatively fast (after
* all, it executes on every allocator operation). Turning on
* reentrancy is a fairly heavyweight mode (disabling tcache,
* redirecting to arena 0, etc.). It's possible we may one day want
* to turn on reentrant mode here, if it proves too difficult to keep
* this working. But that's fairly easy for us to see; OTOH, people
* not using hooks because they're too slow is easy for us to miss.
*
* The tricky part is
* that this code might get invoked even if we don't have access to tsd.
* This function mimics getting a pointer to thread-local data, except
* that it might secretly return a pointer to some global data if we
* know that the caller will take the early-exit path.
* If we return a bool that indicates that we are reentrant, then the
* caller will go down the early exit path, leaving the global
* untouched.
*/
static bool in_hook_global = true;
tsdn_t *tsdn = tsdn_fetch();
bool *in_hook = tsdn_in_hookp_get(tsdn);
if (in_hook != NULL) {
return in_hook;
}
return &in_hook_global;
}
#define HOOK_PROLOGUE \
if (likely(atomic_load_u(&nhooks, ATOMIC_RELAXED) == 0)) { \
return; \
} \
bool *in_hook = hook_reentrantp(); \
if (*in_hook) { \
return; \
} \
*in_hook = true;
#define HOOK_EPILOGUE *in_hook = false;
void
hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw,
uintptr_t args_raw[3]) {
HOOK_PROLOGUE
hooks_internal_t hook;
FOR_EACH_HOOK_BEGIN(&hook)
hook_alloc h = hook.hooks.alloc_hook;
if (h != NULL) {
h(hook.hooks.extra, type, result, result_raw, args_raw);
}
FOR_EACH_HOOK_END
HOOK_EPILOGUE
}
void
hook_invoke_dalloc(hook_dalloc_t type, void *address, uintptr_t args_raw[3]) {
HOOK_PROLOGUE
hooks_internal_t hook;
FOR_EACH_HOOK_BEGIN(&hook)
hook_dalloc h = hook.hooks.dalloc_hook;
if (h != NULL) {
h(hook.hooks.extra, type, address, args_raw);
}
FOR_EACH_HOOK_END
HOOK_EPILOGUE
}
void
hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize,
size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]) {
HOOK_PROLOGUE
hooks_internal_t hook;
FOR_EACH_HOOK_BEGIN(&hook)
hook_expand h = hook.hooks.expand_hook;
if (h != NULL) {
h(hook.hooks.extra, type, address, old_usize, new_usize,
result_raw, args_raw);
}
FOR_EACH_HOOK_END
HOOK_EPILOGUE
}

View file

@ -11,7 +11,6 @@
#include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/fxp.h"
#include "jemalloc/internal/san.h"
#include "jemalloc/internal/hook.h"
#include "jemalloc/internal/jemalloc_init.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/log.h"
@ -776,14 +775,6 @@ malloc_default(size_t size) {
dopts.item_size = size;
imalloc(&sopts, &dopts);
/*
* Note that this branch gets optimized away -- it immediately follows
* the check on tsd_fast that sets sopts.slow.
*/
if (sopts.slow) {
uintptr_t args[3] = {size};
hook_invoke_alloc(hook_alloc_malloc, ret, (uintptr_t)ret, args);
}
return ret;
}
@ -832,12 +823,6 @@ JEMALLOC_ATTR(nonnull(1))
dopts.alignment = alignment;
ret = imalloc(&sopts, &dopts);
if (sopts.slow) {
uintptr_t args[3] = {
(uintptr_t)memptr, (uintptr_t)alignment, (uintptr_t)size};
hook_invoke_alloc(
hook_alloc_posix_memalign, *memptr, (uintptr_t)ret, args);
}
LOG("core.posix_memalign.exit", "result: %d, alloc ptr: %p", ret,
*memptr);
@ -875,11 +860,6 @@ JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
dopts.alignment = alignment;
imalloc(&sopts, &dopts);
if (sopts.slow) {
uintptr_t args[3] = {(uintptr_t)alignment, (uintptr_t)size};
hook_invoke_alloc(
hook_alloc_aligned_alloc, ret, (uintptr_t)ret, args);
}
LOG("core.aligned_alloc.exit", "result: %p", ret);
@ -910,10 +890,6 @@ JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
dopts.zero = true;
imalloc(&sopts, &dopts);
if (sopts.slow) {
uintptr_t args[3] = {(uintptr_t)num, (uintptr_t)size};
hook_invoke_alloc(hook_alloc_calloc, ret, (uintptr_t)ret, args);
}
LOG("core.calloc.exit", "result: %p", ret);
@ -1056,8 +1032,6 @@ free_default(void *ptr) {
tcache_t *tcache = tcache_get_from_ind(tsd,
TCACHE_IND_AUTOMATIC, /* slow */ true,
/* is_alloc */ false);
uintptr_t args_raw[3] = {(uintptr_t)ptr};
hook_invoke_dalloc(hook_dalloc_free, ptr, args_raw);
ifree(tsd, ptr, tcache, /* slow */ true);
}
@ -1125,11 +1099,6 @@ JEMALLOC_ATTR(malloc) je_memalign(size_t alignment, size_t size) {
dopts.alignment = alignment;
imalloc(&sopts, &dopts);
if (sopts.slow) {
uintptr_t args[3] = {alignment, size};
hook_invoke_alloc(
hook_alloc_memalign, ret, (uintptr_t)ret, args);
}
LOG("core.memalign.exit", "result: %p", ret);
return ret;
@ -1163,10 +1132,6 @@ JEMALLOC_ATTR(malloc) je_valloc(size_t size) {
dopts.alignment = PAGE;
imalloc(&sopts, &dopts);
if (sopts.slow) {
uintptr_t args[3] = {size};
hook_invoke_alloc(hook_alloc_valloc, ret, (uintptr_t)ret, args);
}
LOG("core.valloc.exit", "result: %p\n", ret);
return ret;
@ -1204,11 +1169,6 @@ JEMALLOC_ATTR(malloc) je_pvalloc(size_t size) {
dopts.alignment = PAGE;
imalloc(&sopts, &dopts);
if (sopts.slow) {
uintptr_t args[3] = {size};
hook_invoke_alloc(
hook_alloc_pvalloc, ret, (uintptr_t)ret, args);
}
LOG("core.pvalloc.exit", "result: %p\n", ret);
return ret;
@ -1397,11 +1357,6 @@ JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
}
imalloc(&sopts, &dopts);
if (sopts.slow) {
uintptr_t args[3] = {size, flags};
hook_invoke_alloc(
hook_alloc_mallocx, ret, (uintptr_t)ret, args);
}
LOG("core.mallocx.exit", "result: %p", ret);
return ret;
@ -1410,7 +1365,7 @@ JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
static void *
irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize,
size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
prof_tctx_t *tctx, hook_ralloc_args_t *hook_args) {
prof_tctx_t *tctx) {
void *p;
if (tctx == NULL) {
@ -1427,15 +1382,14 @@ irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize,
size_t bumped_usize = sz_sa2u(usize, alignment);
p = iralloct_explicit_slab(tsdn, old_ptr, old_usize,
bumped_usize, alignment, zero, /* slab */ false, tcache,
arena, hook_args);
arena);
if (p == NULL) {
return NULL;
}
arena_prof_promote(tsdn, p, usize, bumped_usize);
} else {
p = iralloct_explicit_slab(tsdn, old_ptr, old_usize, usize,
alignment, zero, /* slab */ false, tcache, arena,
hook_args);
alignment, zero, /* slab */ false, tcache, arena);
}
assert(prof_sample_aligned(p));
@ -1445,7 +1399,7 @@ irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize,
JEMALLOC_ALWAYS_INLINE void *
irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
size_t alignment, size_t usize, bool zero, tcache_t *tcache, arena_t *arena,
emap_alloc_ctx_t *alloc_ctx, hook_ralloc_args_t *hook_args) {
emap_alloc_ctx_t *alloc_ctx) {
prof_info_t old_prof_info;
prof_info_get_and_reset_recent(tsd, old_ptr, alloc_ctx, &old_prof_info);
bool prof_active = prof_active_get_unlocked();
@ -1454,10 +1408,10 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
void *p;
if (unlikely(tctx != PROF_TCTX_SENTINEL)) {
p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize,
usize, alignment, zero, tcache, arena, tctx, hook_args);
usize, alignment, zero, tcache, arena, tctx);
} else {
p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment,
usize, zero, tcache, arena, hook_args);
usize, zero, tcache, arena);
}
if (unlikely(p == NULL)) {
prof_alloc_rollback(tsd, tctx);
@ -1506,17 +1460,15 @@ do_rallocx(void *ptr, size_t size, int flags, bool is_realloc) {
goto label_oom;
}
hook_ralloc_args_t hook_args = {
is_realloc, {(uintptr_t)ptr, size, flags, 0}};
if (config_prof && opt_prof) {
p = irallocx_prof(tsd, ptr, old_usize, size, alignment, usize,
zero, tcache, arena, &alloc_ctx, &hook_args);
zero, tcache, arena, &alloc_ctx);
if (unlikely(p == NULL)) {
goto label_oom;
}
} else {
p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment,
usize, zero, tcache, arena, &hook_args);
usize, zero, tcache, arena);
if (unlikely(p == NULL)) {
goto label_oom;
}
@ -1582,8 +1534,6 @@ do_realloc_nonnull_zero(void *ptr) {
tcache_t *tcache = tcache_get_from_ind(tsd,
TCACHE_IND_AUTOMATIC, /* slow */ true,
/* is_alloc */ false);
uintptr_t args[3] = {(uintptr_t)ptr, 0};
hook_invoke_dalloc(hook_dalloc_realloc, ptr, args);
ifree(tsd, ptr, tcache, true);
check_entry_exit_locking(tsd_tsdn(tsd));
@ -1634,11 +1584,6 @@ JEMALLOC_ALLOC_SIZE(2) je_realloc(void *ptr, size_t size) {
dopts.item_size = size;
imalloc(&sopts, &dopts);
if (sopts.slow) {
uintptr_t args[3] = {(uintptr_t)ptr, size};
hook_invoke_alloc(
hook_alloc_realloc, ret, (uintptr_t)ret, args);
}
LOG("core.realloc.exit", "result: %p", ret);
return ret;
}
@ -1818,12 +1763,6 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
junk_alloc_callback(excess_start, excess_len);
}
label_not_resized:
if (unlikely(!tsd_fast(tsd))) {
uintptr_t args[4] = {(uintptr_t)ptr, size, extra, flags};
hook_invoke_expand(hook_expand_xallocx, ptr, old_usize, usize,
(uintptr_t)usize, args);
}
UTRACE(ptr, size, ptr);
check_entry_exit_locking(tsd_tsdn(tsd));
@ -1877,8 +1816,6 @@ je_dallocx(void *ptr, int flags) {
tsd_assert_fast(tsd);
ifree(tsd, ptr, tcache, false);
} else {
uintptr_t args_raw[3] = {(uintptr_t)ptr, flags};
hook_invoke_dalloc(hook_dalloc_dallocx, ptr, args_raw);
ifree(tsd, ptr, tcache, true);
}
check_entry_exit_locking(tsd_tsdn(tsd));
@ -1916,8 +1853,6 @@ sdallocx_default(void *ptr, size_t size, int flags) {
tsd_assert_fast(tsd);
isfree(tsd, ptr, usize, tcache, false);
} else {
uintptr_t args_raw[3] = {(uintptr_t)ptr, size, flags};
hook_invoke_dalloc(hook_dalloc_sdallocx, ptr, args_raw);
isfree(tsd, ptr, usize, tcache, true);
}
check_entry_exit_locking(tsd_tsdn(tsd));

View file

@ -7,7 +7,6 @@
#include "jemalloc/internal/emap.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/hook.h"
#include "jemalloc/internal/jemalloc_fork.h"
#include "jemalloc/internal/jemalloc_init.h"
#include "jemalloc/internal/malloc_io.h"
@ -234,7 +233,6 @@ malloc_init_hard_a0_locked(void) {
if (arenas_management_boot()) {
return true;
}
hook_boot();
experimental_thread_events_boot();
/*
* Create enough scaffolding to allow recursive allocation in

View file

@ -184,8 +184,7 @@ large_ralloc_move_helper(
void *
large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
size_t alignment, bool zero, tcache_t *tcache,
hook_ralloc_args_t *hook_args) {
size_t alignment, bool zero, tcache_t *tcache) {
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
size_t oldusize = edata_usize_get(edata);
@ -196,9 +195,6 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
/* Try to avoid moving the allocation. */
if (!large_ralloc_no_move(tsdn, edata, usize, usize, zero)) {
hook_invoke_expand(hook_args->is_realloc ? hook_expand_realloc
: hook_expand_rallocx,
ptr, oldusize, usize, (uintptr_t)ptr, hook_args->args);
return edata_addr_get(edata);
}
@ -213,13 +209,6 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
return NULL;
}
hook_invoke_alloc(
hook_args->is_realloc ? hook_alloc_realloc : hook_alloc_rallocx,
ret, (uintptr_t)ret, hook_args->args);
hook_invoke_dalloc(
hook_args->is_realloc ? hook_dalloc_realloc : hook_dalloc_rallocx,
ptr, hook_args->args);
size_t copysize = (usize < oldusize) ? usize : oldusize;
memcpy(ret, edata_addr_get(edata), copysize);
isdalloct(tsdn, edata_addr_get(edata), oldusize, tcache, NULL, true);

View file

@ -63,9 +63,6 @@ typedef ql_head(tsd_t) tsd_list_t;
static tsd_list_t tsd_nominal_tsds = ql_head_initializer(tsd_nominal_tsds);
static malloc_mutex_t tsd_nominal_tsds_lock;
/* How many slow-path-enabling features are turned on. */
static atomic_u32_t tsd_global_slow_count = ATOMIC_INIT(0);
static bool
tsd_in_nominal_list(tsd_t *tsd) {
tsd_t *tsd_list;
@ -104,59 +101,12 @@ tsd_remove_nominal(tsd_t *tsd) {
malloc_mutex_unlock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
}
static void
tsd_force_recompute(tsdn_t *tsdn) {
/*
* The stores to tsd->state here need to synchronize with the exchange
* in tsd_slow_update.
*/
atomic_fence(ATOMIC_RELEASE);
malloc_mutex_lock(tsdn, &tsd_nominal_tsds_lock);
tsd_t *remote_tsd;
ql_foreach (remote_tsd, &tsd_nominal_tsds, TSD_MANGLE(tsd_link)) {
assert(tsd_atomic_load(&remote_tsd->state, ATOMIC_RELAXED)
<= tsd_state_nominal_max);
tsd_atomic_store(&remote_tsd->state,
tsd_state_nominal_recompute, ATOMIC_RELAXED);
/* See comments in te_recompute_fast_threshold(). */
atomic_fence(ATOMIC_SEQ_CST);
te_next_event_fast_set_non_nominal(remote_tsd);
}
malloc_mutex_unlock(tsdn, &tsd_nominal_tsds_lock);
}
void
tsd_global_slow_inc(tsdn_t *tsdn) {
atomic_fetch_add_u32(&tsd_global_slow_count, 1, ATOMIC_RELAXED);
/*
* We unconditionally force a recompute, even if the global slow count
* was already positive. If we didn't, then it would be possible for us
* to return to the user, have the user synchronize externally with some
* other thread, and then have that other thread not have picked up the
* update yet (since the original incrementing thread might still be
* making its way through the tsd list).
*/
tsd_force_recompute(tsdn);
}
void
tsd_global_slow_dec(tsdn_t *tsdn) {
atomic_fetch_sub_u32(&tsd_global_slow_count, 1, ATOMIC_RELAXED);
/* See the note in ..._inc(). */
tsd_force_recompute(tsdn);
}
static bool
tsd_local_slow(tsd_t *tsd) {
return !tsd_tcache_enabled_get(tsd)
|| tsd_reentrancy_level_get(tsd) > 0;
}
bool
tsd_global_slow(void) {
return atomic_load_u32(&tsd_global_slow_count, ATOMIC_RELAXED) > 0;
}
/******************************************************************************/
static uint8_t
@ -165,7 +115,7 @@ tsd_state_compute(tsd_t *tsd) {
return tsd_state_get(tsd);
}
/* We're in *a* nominal state; but which one? */
if (malloc_slow || tsd_local_slow(tsd) || tsd_global_slow()) {
if (malloc_slow || tsd_local_slow(tsd)) {
return tsd_state_nominal_slow;
} else {
return tsd_state_nominal;

View file

@ -126,20 +126,6 @@ p_test_fini(bool skip_test) {
test_status_string(test_status));
}
static void
check_global_slow(test_status_t *status) {
#ifdef JEMALLOC_UNIT_TEST
/*
* This check needs to peek into tsd internals, which is why it's only
* exposed in unit tests.
*/
if (tsd_global_slow()) {
malloc_printf("Testing increased global slow count\n");
*status = test_status_fail;
}
#endif
}
static test_status_t
p_test_impl(bool do_malloc_init, bool do_reentrant, test_t *t, va_list ap) {
selected_test_name = getenv("JEMALLOC_TEST_NAME");
@ -168,7 +154,6 @@ p_test_impl(bool do_malloc_init, bool do_reentrant, test_t *t, va_list ap) {
if (test_status > ret) {
ret = test_status;
}
check_global_slow(&ret);
/* Reentrant run. */
if (do_reentrant) {
reentrancy = libc_reentrant;
@ -178,7 +163,6 @@ p_test_impl(bool do_malloc_init, bool do_reentrant, test_t *t, va_list ap) {
if (test_status > ret) {
ret = test_status;
}
check_global_slow(&ret);
reentrancy = arena_new_reentrant;
test_hooks_libc_hook = NULL;
@ -187,7 +171,6 @@ p_test_impl(bool do_malloc_init, bool do_reentrant, test_t *t, va_list ap) {
if (test_status > ret) {
ret = test_status;
}
check_global_slow(&ret);
}
}

View file

@ -1,70 +0,0 @@
#include "test/jemalloc_test.h"
static void
noop_alloc_hook(void *extra, hook_alloc_t type, void *result,
uintptr_t result_raw, uintptr_t args_raw[3]) {}
static void
noop_dalloc_hook(
void *extra, hook_dalloc_t type, void *address, uintptr_t args_raw[3]) {}
static void
noop_expand_hook(void *extra, hook_expand_t type, void *address,
size_t old_usize, size_t new_usize, uintptr_t result_raw,
uintptr_t args_raw[4]) {}
static void
malloc_free_loop(int iters) {
for (int i = 0; i < iters; i++) {
void *p = mallocx(1, 0);
free(p);
}
}
static void
test_hooked(int iters) {
hooks_t hooks = {
&noop_alloc_hook, &noop_dalloc_hook, &noop_expand_hook, NULL};
int err;
void *handles[HOOK_MAX];
size_t sz = sizeof(handles[0]);
for (int i = 0; i < HOOK_MAX; i++) {
err = mallctl("experimental.hooks.install", &handles[i], &sz,
&hooks, sizeof(hooks));
assert(err == 0);
timedelta_t timer;
timer_start(&timer);
malloc_free_loop(iters);
timer_stop(&timer);
malloc_printf("With %d hook%s: %" FMTu64 "us\n", i + 1,
i + 1 == 1 ? "" : "s", timer_usec(&timer));
}
for (int i = 0; i < HOOK_MAX; i++) {
err = mallctl("experimental.hooks.remove", NULL, NULL,
&handles[i], sizeof(handles[i]));
assert(err == 0);
}
}
static void
test_unhooked(int iters) {
timedelta_t timer;
timer_start(&timer);
malloc_free_loop(iters);
timer_stop(&timer);
malloc_printf("Without hooks: %" FMTu64 "us\n", timer_usec(&timer));
}
int
main(void) {
/* Initialize */
free(mallocx(1, 0));
int iters = 10 * 1000 * 1000;
malloc_printf("Benchmarking hooks with %d iterations:\n", iters);
test_hooked(iters);
test_unhooked(iters);
}

View file

@ -1,589 +0,0 @@
#include "test/jemalloc_test.h"
#include "jemalloc/internal/hook.h"
static void *arg_extra;
static int arg_type;
static void *arg_result;
static void *arg_address;
static size_t arg_old_usize;
static size_t arg_new_usize;
static uintptr_t arg_result_raw;
static uintptr_t arg_args_raw[4];
static int call_count = 0;
static void
reset_args(void) {
arg_extra = NULL;
arg_type = 12345;
arg_result = NULL;
arg_address = NULL;
arg_old_usize = 0;
arg_new_usize = 0;
arg_result_raw = 0;
memset(arg_args_raw, 77, sizeof(arg_args_raw));
}
static void
alloc_free_size(size_t sz) {
void *ptr = mallocx(1, 0);
free(ptr);
ptr = mallocx(1, 0);
free(ptr);
ptr = mallocx(1, MALLOCX_TCACHE_NONE);
dallocx(ptr, MALLOCX_TCACHE_NONE);
}
/*
* We want to support a degree of user reentrancy. This tests a variety of
* allocation scenarios.
*/
static void
be_reentrant(void) {
/* Let's make sure the tcache is non-empty if enabled. */
alloc_free_size(1);
alloc_free_size(1024);
alloc_free_size(64 * 1024);
alloc_free_size(256 * 1024);
alloc_free_size(1024 * 1024);
/* Some reallocation. */
void *ptr = mallocx(129, 0);
ptr = rallocx(ptr, 130, 0);
free(ptr);
ptr = mallocx(2 * 1024 * 1024, 0);
free(ptr);
ptr = mallocx(1 * 1024 * 1024, 0);
ptr = rallocx(ptr, 2 * 1024 * 1024, 0);
free(ptr);
ptr = mallocx(1, 0);
ptr = rallocx(ptr, 1000, 0);
free(ptr);
}
static void
set_args_raw(uintptr_t *args_raw, int nargs) {
memcpy(arg_args_raw, args_raw, sizeof(uintptr_t) * nargs);
}
static void
expect_args_raw(uintptr_t *args_raw_expected, int nargs) {
int cmp = memcmp(
args_raw_expected, arg_args_raw, sizeof(uintptr_t) * nargs);
expect_d_eq(cmp, 0, "Raw args mismatch");
}
static void
reset(void) {
call_count = 0;
reset_args();
}
static void
test_alloc_hook(void *extra, hook_alloc_t type, void *result,
uintptr_t result_raw, uintptr_t args_raw[3]) {
call_count++;
arg_extra = extra;
arg_type = (int)type;
arg_result = result;
arg_result_raw = result_raw;
set_args_raw(args_raw, 3);
be_reentrant();
}
static void
test_dalloc_hook(
void *extra, hook_dalloc_t type, void *address, uintptr_t args_raw[3]) {
call_count++;
arg_extra = extra;
arg_type = (int)type;
arg_address = address;
set_args_raw(args_raw, 3);
be_reentrant();
}
static void
test_expand_hook(void *extra, hook_expand_t type, void *address,
size_t old_usize, size_t new_usize, uintptr_t result_raw,
uintptr_t args_raw[4]) {
call_count++;
arg_extra = extra;
arg_type = (int)type;
arg_address = address;
arg_old_usize = old_usize;
arg_new_usize = new_usize;
arg_result_raw = result_raw;
set_args_raw(args_raw, 4);
be_reentrant();
}
TEST_BEGIN(test_hooks_basic) {
/* Just verify that the record their arguments correctly. */
hooks_t hooks = {&test_alloc_hook, &test_dalloc_hook, &test_expand_hook,
(void *)111};
void *handle = hook_install(TSDN_NULL, &hooks);
uintptr_t args_raw[4] = {10, 20, 30, 40};
/* Alloc */
reset_args();
hook_invoke_alloc(
hook_alloc_posix_memalign, (void *)222, 333, args_raw);
expect_ptr_eq(arg_extra, (void *)111, "Passed wrong user pointer");
expect_d_eq((int)hook_alloc_posix_memalign, arg_type,
"Passed wrong alloc type");
expect_ptr_eq((void *)222, arg_result, "Passed wrong result address");
expect_u64_eq(333, arg_result_raw, "Passed wrong result");
expect_args_raw(args_raw, 3);
/* Dalloc */
reset_args();
hook_invoke_dalloc(hook_dalloc_sdallocx, (void *)222, args_raw);
expect_d_eq(
(int)hook_dalloc_sdallocx, arg_type, "Passed wrong dalloc type");
expect_ptr_eq((void *)111, arg_extra, "Passed wrong user pointer");
expect_ptr_eq((void *)222, arg_address, "Passed wrong address");
expect_args_raw(args_raw, 3);
/* Expand */
reset_args();
hook_invoke_expand(
hook_expand_xallocx, (void *)222, 333, 444, 555, args_raw);
expect_d_eq(
(int)hook_expand_xallocx, arg_type, "Passed wrong expand type");
expect_ptr_eq((void *)111, arg_extra, "Passed wrong user pointer");
expect_ptr_eq((void *)222, arg_address, "Passed wrong address");
expect_zu_eq(333, arg_old_usize, "Passed wrong old usize");
expect_zu_eq(444, arg_new_usize, "Passed wrong new usize");
expect_zu_eq(555, arg_result_raw, "Passed wrong result");
expect_args_raw(args_raw, 4);
hook_remove(TSDN_NULL, handle);
}
TEST_END
TEST_BEGIN(test_hooks_null) {
/* Null hooks should be ignored, not crash. */
hooks_t hooks1 = {NULL, NULL, NULL, NULL};
hooks_t hooks2 = {&test_alloc_hook, NULL, NULL, NULL};
hooks_t hooks3 = {NULL, &test_dalloc_hook, NULL, NULL};
hooks_t hooks4 = {NULL, NULL, &test_expand_hook, NULL};
void *handle1 = hook_install(TSDN_NULL, &hooks1);
void *handle2 = hook_install(TSDN_NULL, &hooks2);
void *handle3 = hook_install(TSDN_NULL, &hooks3);
void *handle4 = hook_install(TSDN_NULL, &hooks4);
expect_ptr_ne(handle1, NULL, "Hook installation failed");
expect_ptr_ne(handle2, NULL, "Hook installation failed");
expect_ptr_ne(handle3, NULL, "Hook installation failed");
expect_ptr_ne(handle4, NULL, "Hook installation failed");
uintptr_t args_raw[4] = {10, 20, 30, 40};
call_count = 0;
hook_invoke_alloc(hook_alloc_malloc, NULL, 0, args_raw);
expect_d_eq(call_count, 1, "Called wrong number of times");
call_count = 0;
hook_invoke_dalloc(hook_dalloc_free, NULL, args_raw);
expect_d_eq(call_count, 1, "Called wrong number of times");
call_count = 0;
hook_invoke_expand(hook_expand_realloc, NULL, 0, 0, 0, args_raw);
expect_d_eq(call_count, 1, "Called wrong number of times");
hook_remove(TSDN_NULL, handle1);
hook_remove(TSDN_NULL, handle2);
hook_remove(TSDN_NULL, handle3);
hook_remove(TSDN_NULL, handle4);
}
TEST_END
TEST_BEGIN(test_hooks_remove) {
hooks_t hooks = {&test_alloc_hook, NULL, NULL, NULL};
void *handle = hook_install(TSDN_NULL, &hooks);
expect_ptr_ne(handle, NULL, "Hook installation failed");
call_count = 0;
uintptr_t args_raw[4] = {10, 20, 30, 40};
hook_invoke_alloc(hook_alloc_malloc, NULL, 0, args_raw);
expect_d_eq(call_count, 1, "Hook not invoked");
call_count = 0;
hook_remove(TSDN_NULL, handle);
hook_invoke_alloc(hook_alloc_malloc, NULL, 0, NULL);
expect_d_eq(call_count, 0, "Hook invoked after removal");
}
TEST_END
TEST_BEGIN(test_hooks_alloc_simple) {
/* "Simple" in the sense that we're not in a realloc variant. */
hooks_t hooks = {&test_alloc_hook, NULL, NULL, (void *)123};
void *handle = hook_install(TSDN_NULL, &hooks);
expect_ptr_ne(handle, NULL, "Hook installation failed");
/* Stop malloc from being optimized away. */
volatile int err;
void *volatile ptr;
/* malloc */
reset();
ptr = malloc(1);
expect_d_eq(call_count, 1, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(arg_type, (int)hook_alloc_malloc, "Wrong hook type");
expect_ptr_eq(ptr, arg_result, "Wrong result");
expect_u64_eq(
(uintptr_t)ptr, (uintptr_t)arg_result_raw, "Wrong raw result");
expect_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument");
free(ptr);
/* posix_memalign */
reset();
err = posix_memalign((void **)&ptr, 1024, 1);
expect_d_eq(call_count, 1, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(
arg_type, (int)hook_alloc_posix_memalign, "Wrong hook type");
expect_ptr_eq(ptr, arg_result, "Wrong result");
expect_u64_eq(
(uintptr_t)err, (uintptr_t)arg_result_raw, "Wrong raw result");
expect_u64_eq((uintptr_t)&ptr, arg_args_raw[0], "Wrong argument");
expect_u64_eq((uintptr_t)1024, arg_args_raw[1], "Wrong argument");
expect_u64_eq((uintptr_t)1, arg_args_raw[2], "Wrong argument");
free(ptr);
/* aligned_alloc */
reset();
ptr = aligned_alloc(1024, 1);
expect_d_eq(call_count, 1, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(arg_type, (int)hook_alloc_aligned_alloc, "Wrong hook type");
expect_ptr_eq(ptr, arg_result, "Wrong result");
expect_u64_eq(
(uintptr_t)ptr, (uintptr_t)arg_result_raw, "Wrong raw result");
expect_u64_eq((uintptr_t)1024, arg_args_raw[0], "Wrong argument");
expect_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument");
free(ptr);
/* calloc */
reset();
ptr = calloc(11, 13);
expect_d_eq(call_count, 1, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(arg_type, (int)hook_alloc_calloc, "Wrong hook type");
expect_ptr_eq(ptr, arg_result, "Wrong result");
expect_u64_eq(
(uintptr_t)ptr, (uintptr_t)arg_result_raw, "Wrong raw result");
expect_u64_eq((uintptr_t)11, arg_args_raw[0], "Wrong argument");
expect_u64_eq((uintptr_t)13, arg_args_raw[1], "Wrong argument");
free(ptr);
/* memalign */
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
reset();
ptr = memalign(1024, 1);
expect_d_eq(call_count, 1, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(arg_type, (int)hook_alloc_memalign, "Wrong hook type");
expect_ptr_eq(ptr, arg_result, "Wrong result");
expect_u64_eq(
(uintptr_t)ptr, (uintptr_t)arg_result_raw, "Wrong raw result");
expect_u64_eq((uintptr_t)1024, arg_args_raw[0], "Wrong argument");
expect_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument");
free(ptr);
#endif /* JEMALLOC_OVERRIDE_MEMALIGN */
/* valloc */
#ifdef JEMALLOC_OVERRIDE_VALLOC
reset();
ptr = valloc(1);
expect_d_eq(call_count, 1, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(arg_type, (int)hook_alloc_valloc, "Wrong hook type");
expect_ptr_eq(ptr, arg_result, "Wrong result");
expect_u64_eq(
(uintptr_t)ptr, (uintptr_t)arg_result_raw, "Wrong raw result");
expect_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument");
free(ptr);
#endif /* JEMALLOC_OVERRIDE_VALLOC */
/* pvalloc */
#ifdef JEMALLOC_OVERRIDE_PVALLOC
reset();
ptr = pvalloc(1);
expect_d_eq(call_count, 1, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(arg_type, (int)hook_alloc_pvalloc, "Wrong hook type");
expect_ptr_eq(ptr, arg_result, "Wrong result");
expect_u64_eq(
(uintptr_t)ptr, (uintptr_t)arg_result_raw, "Wrong raw result");
expect_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument");
free(ptr);
#endif /* JEMALLOC_OVERRIDE_PVALLOC */
/* mallocx */
reset();
ptr = mallocx(1, MALLOCX_LG_ALIGN(10));
expect_d_eq(call_count, 1, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(arg_type, (int)hook_alloc_mallocx, "Wrong hook type");
expect_ptr_eq(ptr, arg_result, "Wrong result");
expect_u64_eq(
(uintptr_t)ptr, (uintptr_t)arg_result_raw, "Wrong raw result");
expect_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument");
expect_u64_eq(
(uintptr_t)MALLOCX_LG_ALIGN(10), arg_args_raw[1], "Wrong flags");
free(ptr);
hook_remove(TSDN_NULL, handle);
}
TEST_END
TEST_BEGIN(test_hooks_dalloc_simple) {
/* "Simple" in the sense that we're not in a realloc variant. */
hooks_t hooks = {NULL, &test_dalloc_hook, NULL, (void *)123};
void *handle = hook_install(TSDN_NULL, &hooks);
expect_ptr_ne(handle, NULL, "Hook installation failed");
void *volatile ptr;
/* free() */
reset();
ptr = malloc(1);
free(ptr);
expect_d_eq(call_count, 1, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(arg_type, (int)hook_dalloc_free, "Wrong hook type");
expect_ptr_eq(ptr, arg_address, "Wrong pointer freed");
expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
/* dallocx() */
reset();
ptr = malloc(1);
dallocx(ptr, MALLOCX_TCACHE_NONE);
expect_d_eq(call_count, 1, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(arg_type, (int)hook_dalloc_dallocx, "Wrong hook type");
expect_ptr_eq(ptr, arg_address, "Wrong pointer freed");
expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
expect_u64_eq(
(uintptr_t)MALLOCX_TCACHE_NONE, arg_args_raw[1], "Wrong raw arg");
/* sdallocx() */
reset();
ptr = malloc(1);
sdallocx(ptr, 1, MALLOCX_TCACHE_NONE);
expect_d_eq(call_count, 1, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(arg_type, (int)hook_dalloc_sdallocx, "Wrong hook type");
expect_ptr_eq(ptr, arg_address, "Wrong pointer freed");
expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
expect_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong raw arg");
expect_u64_eq(
(uintptr_t)MALLOCX_TCACHE_NONE, arg_args_raw[2], "Wrong raw arg");
hook_remove(TSDN_NULL, handle);
}
TEST_END
TEST_BEGIN(test_hooks_expand_simple) {
/* "Simple" in the sense that we're not in a realloc variant. */
hooks_t hooks = {NULL, NULL, &test_expand_hook, (void *)123};
void *handle = hook_install(TSDN_NULL, &hooks);
expect_ptr_ne(handle, NULL, "Hook installation failed");
void *volatile ptr;
/* xallocx() */
reset();
ptr = malloc(1);
size_t new_usize = xallocx(ptr, 100, 200, MALLOCX_TCACHE_NONE);
expect_d_eq(call_count, 1, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(arg_type, (int)hook_expand_xallocx, "Wrong hook type");
expect_ptr_eq(ptr, arg_address, "Wrong pointer expanded");
expect_u64_eq(arg_old_usize, nallocx(1, 0), "Wrong old usize");
expect_u64_eq(arg_new_usize, sallocx(ptr, 0), "Wrong new usize");
expect_u64_eq(new_usize, arg_result_raw, "Wrong result");
expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong arg");
expect_u64_eq(100, arg_args_raw[1], "Wrong arg");
expect_u64_eq(200, arg_args_raw[2], "Wrong arg");
expect_u64_eq(MALLOCX_TCACHE_NONE, arg_args_raw[3], "Wrong arg");
hook_remove(TSDN_NULL, handle);
}
TEST_END
TEST_BEGIN(test_hooks_realloc_as_malloc_or_free) {
hooks_t hooks = {&test_alloc_hook, &test_dalloc_hook, &test_expand_hook,
(void *)123};
void *handle = hook_install(TSDN_NULL, &hooks);
expect_ptr_ne(handle, NULL, "Hook installation failed");
void *volatile ptr;
/* realloc(NULL, size) as malloc */
reset();
ptr = realloc(NULL, 1);
expect_d_eq(call_count, 1, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(arg_type, (int)hook_alloc_realloc, "Wrong hook type");
expect_ptr_eq(ptr, arg_result, "Wrong result");
expect_u64_eq(
(uintptr_t)ptr, (uintptr_t)arg_result_raw, "Wrong raw result");
expect_u64_eq((uintptr_t)NULL, arg_args_raw[0], "Wrong argument");
expect_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument");
free(ptr);
/* realloc(ptr, 0) as free */
if (opt_zero_realloc_action == zero_realloc_action_free) {
ptr = malloc(1);
reset();
realloc(ptr, 0);
expect_d_eq(call_count, 1, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(
arg_type, (int)hook_dalloc_realloc, "Wrong hook type");
expect_ptr_eq(ptr, arg_address, "Wrong pointer freed");
expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
expect_u64_eq((uintptr_t)0, arg_args_raw[1], "Wrong raw arg");
}
/* realloc(NULL, 0) as malloc(0) */
reset();
ptr = realloc(NULL, 0);
expect_d_eq(call_count, 1, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(arg_type, (int)hook_alloc_realloc, "Wrong hook type");
expect_ptr_eq(ptr, arg_result, "Wrong result");
expect_u64_eq(
(uintptr_t)ptr, (uintptr_t)arg_result_raw, "Wrong raw result");
expect_u64_eq((uintptr_t)NULL, arg_args_raw[0], "Wrong argument");
expect_u64_eq((uintptr_t)0, arg_args_raw[1], "Wrong argument");
free(ptr);
hook_remove(TSDN_NULL, handle);
}
TEST_END
static void
do_realloc_test(void *(*ralloc)(void *, size_t, int), int flags,
int expand_type, int dalloc_type) {
hooks_t hooks = {&test_alloc_hook, &test_dalloc_hook, &test_expand_hook,
(void *)123};
void *handle = hook_install(TSDN_NULL, &hooks);
expect_ptr_ne(handle, NULL, "Hook installation failed");
void *volatile ptr;
void *volatile ptr2;
/* Realloc in-place, small. */
ptr = malloc(129);
reset();
ptr2 = ralloc(ptr, 130, flags);
expect_ptr_eq(ptr, ptr2, "Small realloc moved");
expect_d_eq(call_count, 1, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(arg_type, expand_type, "Wrong hook type");
expect_ptr_eq(ptr, arg_address, "Wrong address");
expect_u64_eq(
(uintptr_t)ptr, (uintptr_t)arg_result_raw, "Wrong raw result");
expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument");
expect_u64_eq((uintptr_t)130, arg_args_raw[1], "Wrong argument");
free(ptr);
/*
* Realloc in-place, large. Since we can't guarantee the large case
* across all platforms, we stay resilient to moving results.
*/
ptr = malloc(2 * 1024 * 1024);
free(ptr);
ptr2 = malloc(1 * 1024 * 1024);
reset();
ptr = ralloc(ptr2, 2 * 1024 * 1024, flags);
/* ptr is the new address, ptr2 is the old address. */
if (ptr == ptr2) {
expect_d_eq(call_count, 1, "Hook not called");
expect_d_eq(arg_type, expand_type, "Wrong hook type");
} else {
expect_d_eq(call_count, 2, "Wrong hooks called");
expect_ptr_eq(ptr, arg_result, "Wrong address");
expect_d_eq(arg_type, dalloc_type, "Wrong hook type");
}
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_ptr_eq(ptr2, arg_address, "Wrong address");
expect_u64_eq(
(uintptr_t)ptr, (uintptr_t)arg_result_raw, "Wrong raw result");
expect_u64_eq((uintptr_t)ptr2, arg_args_raw[0], "Wrong argument");
expect_u64_eq(
(uintptr_t)2 * 1024 * 1024, arg_args_raw[1], "Wrong argument");
free(ptr);
/* Realloc with move, small. */
ptr = malloc(8);
reset();
ptr2 = ralloc(ptr, 128, flags);
expect_ptr_ne(ptr, ptr2, "Small realloc didn't move");
expect_d_eq(call_count, 2, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(arg_type, dalloc_type, "Wrong hook type");
expect_ptr_eq(ptr, arg_address, "Wrong address");
expect_ptr_eq(ptr2, arg_result, "Wrong address");
expect_u64_eq(
(uintptr_t)ptr2, (uintptr_t)arg_result_raw, "Wrong raw result");
expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument");
expect_u64_eq((uintptr_t)128, arg_args_raw[1], "Wrong argument");
free(ptr2);
/* Realloc with move, large. */
ptr = malloc(1);
reset();
ptr2 = ralloc(ptr, 2 * 1024 * 1024, flags);
expect_ptr_ne(ptr, ptr2, "Large realloc didn't move");
expect_d_eq(call_count, 2, "Hook not called");
expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
expect_d_eq(arg_type, dalloc_type, "Wrong hook type");
expect_ptr_eq(ptr, arg_address, "Wrong address");
expect_ptr_eq(ptr2, arg_result, "Wrong address");
expect_u64_eq(
(uintptr_t)ptr2, (uintptr_t)arg_result_raw, "Wrong raw result");
expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument");
expect_u64_eq(
(uintptr_t)2 * 1024 * 1024, arg_args_raw[1], "Wrong argument");
free(ptr2);
hook_remove(TSDN_NULL, handle);
}
static void *
realloc_wrapper(void *ptr, size_t size, UNUSED int flags) {
return realloc(ptr, size);
}
TEST_BEGIN(test_hooks_realloc) {
do_realloc_test(
&realloc_wrapper, 0, hook_expand_realloc, hook_dalloc_realloc);
}
TEST_END
TEST_BEGIN(test_hooks_rallocx) {
do_realloc_test(&rallocx, MALLOCX_TCACHE_NONE, hook_expand_rallocx,
hook_dalloc_rallocx);
}
TEST_END
int
main(void) {
/* We assert on call counts. */
return test_no_reentrancy(test_hooks_basic, test_hooks_null,
test_hooks_remove, test_hooks_alloc_simple,
test_hooks_dalloc_simple, test_hooks_expand_simple,
test_hooks_realloc_as_malloc_or_free, test_hooks_realloc,
test_hooks_rallocx);
}

View file

@ -1,7 +1,6 @@
#include "test/jemalloc_test.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/hook.h"
#include "jemalloc/internal/util.h"
TEST_BEGIN(test_mallctl_errors) {
@ -1216,79 +1215,6 @@ TEST_BEGIN(test_stats_arenas_hpa_shard_slabs) {
}
TEST_END
static void
alloc_hook(void *extra, UNUSED hook_alloc_t type, UNUSED void *result,
UNUSED uintptr_t result_raw, UNUSED uintptr_t args_raw[3]) {
*(bool *)extra = true;
}
static void
dalloc_hook(void *extra, UNUSED hook_dalloc_t type, UNUSED void *address,
UNUSED uintptr_t args_raw[3]) {
*(bool *)extra = true;
}
TEST_BEGIN(test_hooks) {
bool hook_called = false;
hooks_t hooks = {&alloc_hook, &dalloc_hook, NULL, &hook_called};
void *handle = NULL;
size_t sz = sizeof(handle);
int err = mallctl(
"experimental.hooks.install", &handle, &sz, &hooks, sizeof(hooks));
expect_d_eq(err, 0, "Hook installation failed");
expect_ptr_ne(handle, NULL, "Hook installation gave null handle");
void *ptr = mallocx(1, 0);
expect_true(hook_called, "Alloc hook not called");
hook_called = false;
free(ptr);
expect_true(hook_called, "Free hook not called");
err = mallctl(
"experimental.hooks.remove", NULL, NULL, &handle, sizeof(handle));
expect_d_eq(err, 0, "Hook removal failed");
hook_called = false;
ptr = mallocx(1, 0);
free(ptr);
expect_false(hook_called, "Hook called after removal");
}
TEST_END
TEST_BEGIN(test_hooks_exhaustion) {
bool hook_called = false;
hooks_t hooks = {&alloc_hook, &dalloc_hook, NULL, &hook_called};
void *handle;
void *handles[HOOK_MAX];
size_t sz = sizeof(handle);
int err;
for (int i = 0; i < HOOK_MAX; i++) {
handle = NULL;
err = mallctl("experimental.hooks.install", &handle, &sz,
&hooks, sizeof(hooks));
expect_d_eq(err, 0, "Error installation hooks");
expect_ptr_ne(handle, NULL, "Got NULL handle");
handles[i] = handle;
}
err = mallctl(
"experimental.hooks.install", &handle, &sz, &hooks, sizeof(hooks));
expect_d_eq(err, EAGAIN, "Should have failed hook installation");
for (int i = 0; i < HOOK_MAX; i++) {
err = mallctl("experimental.hooks.remove", NULL, NULL,
&handles[i], sizeof(handles[i]));
expect_d_eq(err, 0, "Hook removal failed");
}
/* Insertion failed, but then we removed some; it should work now. */
handle = NULL;
err = mallctl(
"experimental.hooks.install", &handle, &sz, &hooks, sizeof(hooks));
expect_d_eq(err, 0, "Hook insertion failed");
expect_ptr_ne(handle, NULL, "Got NULL handle");
err = mallctl(
"experimental.hooks.remove", NULL, NULL, &handle, sizeof(handle));
expect_d_eq(err, 0, "Hook removal failed");
}
TEST_END
TEST_BEGIN(test_thread_idle) {
/*
* We're cheating a little bit in this test, and inferring things about
@ -1483,7 +1409,6 @@ main(void) {
test_arenas_lextent_constants, test_arenas_create,
test_arenas_lookup, test_prof_active, test_stats_arenas,
test_stats_arenas_hpa_shard_counters,
test_stats_arenas_hpa_shard_slabs, test_hooks,
test_hooks_exhaustion, test_thread_idle, test_thread_peak,
test_stats_arenas_hpa_shard_slabs, test_thread_idle, test_thread_peak,
test_thread_event_hook);
}

View file

@ -1,94 +0,0 @@
#include "test/jemalloc_test.h"
#include "jemalloc/internal/seq.h"
typedef struct data_s data_t;
struct data_s {
int arr[10];
};
static void
set_data(data_t *data, int num) {
for (int i = 0; i < 10; i++) {
data->arr[i] = num;
}
}
static void
expect_data(data_t *data) {
int num = data->arr[0];
for (int i = 0; i < 10; i++) {
expect_d_eq(num, data->arr[i], "Data consistency error");
}
}
seq_define(data_t, data)
typedef struct thd_data_s thd_data_t;
struct thd_data_s {
seq_data_t data;
};
static void *
seq_reader_thd(void *arg) {
thd_data_t *thd_data = (thd_data_t *)arg;
int iter = 0;
data_t local_data;
while (iter < 1000 * 1000 - 1) {
bool success = seq_try_load_data(&local_data, &thd_data->data);
if (success) {
expect_data(&local_data);
expect_d_le(iter, local_data.arr[0],
"Seq read went back in time.");
iter = local_data.arr[0];
}
}
return NULL;
}
static void *
seq_writer_thd(void *arg) {
thd_data_t *thd_data = (thd_data_t *)arg;
data_t local_data;
memset(&local_data, 0, sizeof(local_data));
for (int i = 0; i < 1000 * 1000; i++) {
set_data(&local_data, i);
seq_store_data(&thd_data->data, &local_data);
}
return NULL;
}
TEST_BEGIN(test_seq_threaded) {
thd_data_t thd_data;
memset(&thd_data, 0, sizeof(thd_data));
thd_t reader;
thd_t writer;
thd_create(&reader, seq_reader_thd, &thd_data);
thd_create(&writer, seq_writer_thd, &thd_data);
thd_join(reader, NULL);
thd_join(writer, NULL);
}
TEST_END
TEST_BEGIN(test_seq_simple) {
data_t data;
seq_data_t seq;
memset(&seq, 0, sizeof(seq));
for (int i = 0; i < 1000 * 1000; i++) {
set_data(&data, i);
seq_store_data(&seq, &data);
set_data(&data, 0);
bool success = seq_try_load_data(&data, &seq);
expect_b_eq(success, true, "Failed non-racing read");
expect_data(&data);
}
}
TEST_END
int
main(void) {
return test_no_reentrancy(test_seq_simple, test_seq_threaded);
}

View file

@ -1,10 +1,5 @@
#include "test/jemalloc_test.h"
/*
* If we're e.g. in debug mode, we *never* enter the fast path, and so shouldn't
* be asserting that we're on one.
*/
static bool originally_fast;
static int data_cleanup_count;
void
@ -190,128 +185,6 @@ TEST_BEGIN(test_tsd_sub_thread_dalloc_only) {
}
TEST_END
typedef struct {
atomic_u32_t phase;
atomic_b_t error;
} global_slow_data_t;
static void *
thd_start_global_slow(void *arg) {
/* PHASE 0 */
global_slow_data_t *data = (global_slow_data_t *)arg;
free(mallocx(1, 0));
tsd_t *tsd = tsd_fetch();
/*
* No global slowness has happened yet; there was an error if we were
* originally fast but aren't now.
*/
atomic_store_b(
&data->error, originally_fast && !tsd_fast(tsd), ATOMIC_SEQ_CST);
atomic_store_u32(&data->phase, 1, ATOMIC_SEQ_CST);
/* PHASE 2 */
while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 2) {
}
free(mallocx(1, 0));
atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST);
atomic_store_u32(&data->phase, 3, ATOMIC_SEQ_CST);
/* PHASE 4 */
while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 4) {
}
free(mallocx(1, 0));
atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST);
atomic_store_u32(&data->phase, 5, ATOMIC_SEQ_CST);
/* PHASE 6 */
while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 6) {
}
free(mallocx(1, 0));
/* Only one decrement so far. */
atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST);
atomic_store_u32(&data->phase, 7, ATOMIC_SEQ_CST);
/* PHASE 8 */
while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 8) {
}
free(mallocx(1, 0));
/*
* Both decrements happened; we should be fast again (if we ever
* were)
*/
atomic_store_b(
&data->error, originally_fast && !tsd_fast(tsd), ATOMIC_SEQ_CST);
atomic_store_u32(&data->phase, 9, ATOMIC_SEQ_CST);
return NULL;
}
TEST_BEGIN(test_tsd_global_slow) {
global_slow_data_t data = {ATOMIC_INIT(0), ATOMIC_INIT(false)};
/*
* Note that the "mallocx" here (vs. malloc) is important, since the
* compiler is allowed to optimize away free(malloc(1)) but not
* free(mallocx(1)).
*/
free(mallocx(1, 0));
tsd_t *tsd = tsd_fetch();
originally_fast = tsd_fast(tsd);
thd_t thd;
thd_create(&thd, thd_start_global_slow, (void *)&data.phase);
/* PHASE 1 */
while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 1) {
/*
* We don't have a portable condvar/semaphore mechanism.
* Spin-wait.
*/
}
expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
tsd_global_slow_inc(tsd_tsdn(tsd));
free(mallocx(1, 0));
expect_false(tsd_fast(tsd), "");
atomic_store_u32(&data.phase, 2, ATOMIC_SEQ_CST);
/* PHASE 3 */
while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 3) {
}
expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
/* Increase again, so that we can test multiple fast/slow changes. */
tsd_global_slow_inc(tsd_tsdn(tsd));
atomic_store_u32(&data.phase, 4, ATOMIC_SEQ_CST);
free(mallocx(1, 0));
expect_false(tsd_fast(tsd), "");
/* PHASE 5 */
while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 5) {
}
expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
tsd_global_slow_dec(tsd_tsdn(tsd));
atomic_store_u32(&data.phase, 6, ATOMIC_SEQ_CST);
/* We only decreased once; things should still be slow. */
free(mallocx(1, 0));
expect_false(tsd_fast(tsd), "");
/* PHASE 7 */
while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 7) {
}
expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
tsd_global_slow_dec(tsd_tsdn(tsd));
atomic_store_u32(&data.phase, 8, ATOMIC_SEQ_CST);
/* We incremented and then decremented twice; we should be fast now. */
free(mallocx(1, 0));
expect_true(!originally_fast || tsd_fast(tsd), "");
/* PHASE 9 */
while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 9) {
}
expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
thd_join(thd, NULL);
}
TEST_END
int
main(void) {
/* Ensure tsd bootstrapped. */
@ -321,6 +194,5 @@ main(void) {
}
return test_no_reentrancy(test_tsd_main_thread, test_tsd_sub_thread,
test_tsd_sub_thread_dalloc_only, test_tsd_reincarnation,
test_tsd_global_slow);
test_tsd_sub_thread_dalloc_only, test_tsd_reincarnation);
}