Remove experimental.thread.activity_callback

This commit is contained in:
Slobodan Predolac 2026-03-31 18:12:23 -07:00 committed by Guangli Dai
parent 19bbefe136
commit 176ea0a801
5 changed files with 3 additions and 148 deletions

View file

@ -1,26 +0,0 @@
#ifndef JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H
#define JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H
#include "jemalloc/internal/jemalloc_preamble.h"
/*
* The callback to be executed "periodically", in response to some amount of
* allocator activity.
*
* This callback need not be computing any sort of peak (although that's the
* intended first use case), but we drive it from the peak counter, so it's
* keeps things tidy to keep it here.
*
* The calls to this thunk get driven by the peak_event module.
*/
#define ACTIVITY_CALLBACK_THUNK_INITIALIZER \
{ NULL, NULL }
typedef void (*activity_callback_t)(
void *uctx, uint64_t allocated, uint64_t deallocated);
typedef struct activity_callback_thunk_s activity_callback_thunk_t;
struct activity_callback_thunk_s {
activity_callback_t callback;
void *uctx;
};
#endif /* JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H */

View file

@ -4,7 +4,6 @@
#define JEMALLOC_INTERNAL_TSD_INTERNALS_H
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/activity_callback.h"
#include "jemalloc/internal/arena_types.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/bin_types.h"
@ -84,8 +83,6 @@ typedef ql_elm(tsd_t) tsd_link_t;
O(tsd_link, tsd_link_t, tsd_link_t) \
O(in_hook, bool, bool) \
O(peak, peak_t, peak_t) \
O(activity_callback_thunk, activity_callback_thunk_t, \
activity_callback_thunk_t) \
O(tcache_slow, tcache_slow_t, tcache_slow_t) \
O(rtree_ctx, rtree_ctx_t, rtree_ctx_t)
@ -105,8 +102,7 @@ typedef ql_elm(tsd_t) tsd_link_t;
/* sec_shard */ (uint8_t) - 1, \
/* binshards */ TSD_BINSHARDS_ZERO_INITIALIZER, \
/* tsd_link */ {NULL}, /* in_hook */ false, \
/* peak */ PEAK_INITIALIZER, /* activity_callback_thunk */ \
ACTIVITY_CALLBACK_THUNK_INITIALIZER, \
/* peak */ PEAK_INITIALIZER, \
/* tcache_slow */ TCACHE_SLOW_ZERO_INITIALIZER, \
/* rtree_ctx */ RTREE_CTX_INITIALIZER,

View file

@ -365,7 +365,6 @@ CTL_PROTO(experimental_hooks_prof_sample)
CTL_PROTO(experimental_hooks_prof_sample_free)
CTL_PROTO(experimental_hooks_thread_event)
CTL_PROTO(experimental_hooks_safety_check_abort)
CTL_PROTO(experimental_thread_activity_callback)
CTL_PROTO(experimental_utilization_query)
CTL_PROTO(experimental_utilization_batch_query)
CTL_PROTO(experimental_arenas_i_pactivep)
@ -890,9 +889,6 @@ static const ctl_named_node_t experimental_hooks_node[] = {
{NAME("thread_event"), CTL(experimental_hooks_thread_event)},
};
static const ctl_named_node_t experimental_thread_node[] = {
{NAME("activity_callback"), CTL(experimental_thread_activity_callback)}};
static const ctl_named_node_t experimental_utilization_node[] = {
{NAME("query"), CTL(experimental_utilization_query)},
{NAME("batch_query"), CTL(experimental_utilization_batch_query)}};
@ -916,8 +912,7 @@ static const ctl_named_node_t experimental_node[] = {
{NAME("arenas"), CHILD(indexed, experimental_arenas)},
{NAME("arenas_create_ext"), CTL(experimental_arenas_create_ext)},
{NAME("prof_recent"), CHILD(named, experimental_prof_recent)},
{NAME("batch_alloc"), CTL(experimental_batch_alloc)},
{NAME("thread"), CHILD(named, experimental_thread)}};
{NAME("batch_alloc"), CTL(experimental_batch_alloc)}};
static const ctl_named_node_t root_node[] = {{NAME("version"), CTL(version)},
{NAME("epoch"), CTL(epoch)},
@ -4255,32 +4250,6 @@ label_return:
return ret;
}
static int
experimental_thread_activity_callback_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
if (!config_stats) {
return ENOENT;
}
activity_callback_thunk_t t_old = tsd_activity_callback_thunk_get(tsd);
READ(t_old, activity_callback_thunk_t);
if (newp != NULL) {
/*
* This initialization is unnecessary. If it's omitted, though,
* clang gets confused and warns on the subsequent use of t_new.
*/
activity_callback_thunk_t t_new = {NULL, NULL};
WRITE(t_new, activity_callback_thunk_t);
tsd_activity_callback_thunk_set(tsd, t_new);
}
ret = 0;
label_return:
return ret;
}
/*
* Output six memory utilization entries for an input pointer, the first one of
* type (void *) and the remaining five of type size_t, describing the following

View file

@ -3,7 +3,6 @@
#include "jemalloc/internal/peak_event.h"
#include "jemalloc/internal/activity_callback.h"
#include "jemalloc/internal/peak.h"
#include "jemalloc/internal/thread_event_registry.h"
@ -16,17 +15,6 @@ peak_event_update(tsd_t *tsd) {
peak_update(peak, alloc, dalloc);
}
static void
peak_event_activity_callback(tsd_t *tsd) {
activity_callback_thunk_t *thunk = tsd_activity_callback_thunkp_get(
tsd);
uint64_t alloc = tsd_thread_allocated_get(tsd);
uint64_t dalloc = tsd_thread_deallocated_get(tsd);
if (thunk->callback != NULL) {
thunk->callback(thunk->uctx, alloc, dalloc);
}
}
/* Set current state to zero. */
void
peak_event_zero(tsd_t *tsd) {
@ -55,7 +43,6 @@ peak_event_postponed_event_wait(tsd_t *tsd) {
static void
peak_event_handler(tsd_t *tsd) {
peak_event_update(tsd);
peak_event_activity_callback(tsd);
}
static te_enabled_t

View file

@ -1332,77 +1332,6 @@ TEST_BEGIN(test_thread_peak) {
}
TEST_END
typedef struct activity_test_data_s activity_test_data_t;
struct activity_test_data_s {
uint64_t obtained_alloc;
uint64_t obtained_dalloc;
};
static void
activity_test_callback(void *uctx, uint64_t alloc, uint64_t dalloc) {
activity_test_data_t *test_data = (activity_test_data_t *)uctx;
test_data->obtained_alloc = alloc;
test_data->obtained_dalloc = dalloc;
}
TEST_BEGIN(test_thread_activity_callback) {
test_skip_if(!config_stats);
const size_t big_size = 10 * 1024 * 1024;
void *ptr;
int err;
size_t sz;
uint64_t *allocatedp;
uint64_t *deallocatedp;
sz = sizeof(allocatedp);
err = mallctl("thread.allocatedp", &allocatedp, &sz, NULL, 0);
assert_d_eq(0, err, "");
err = mallctl("thread.deallocatedp", &deallocatedp, &sz, NULL, 0);
assert_d_eq(0, err, "");
activity_callback_thunk_t old_thunk = {
(activity_callback_t)111, (void *)222};
activity_test_data_t test_data = {333, 444};
activity_callback_thunk_t new_thunk = {
&activity_test_callback, &test_data};
sz = sizeof(old_thunk);
err = mallctl("experimental.thread.activity_callback", &old_thunk, &sz,
&new_thunk, sizeof(new_thunk));
assert_d_eq(0, err, "");
expect_true(old_thunk.callback == NULL, "Callback already installed");
expect_true(old_thunk.uctx == NULL, "Callback data already installed");
ptr = mallocx(big_size, 0);
expect_u64_eq(test_data.obtained_alloc, *allocatedp, "");
expect_u64_eq(test_data.obtained_dalloc, *deallocatedp, "");
free(ptr);
expect_u64_eq(test_data.obtained_alloc, *allocatedp, "");
expect_u64_eq(test_data.obtained_dalloc, *deallocatedp, "");
sz = sizeof(old_thunk);
new_thunk = (activity_callback_thunk_t){NULL, NULL};
err = mallctl("experimental.thread.activity_callback", &old_thunk, &sz,
&new_thunk, sizeof(new_thunk));
assert_d_eq(0, err, "");
expect_true(old_thunk.callback == &activity_test_callback, "");
expect_true(old_thunk.uctx == &test_data, "");
/* Inserting NULL should have turned off tracking. */
test_data.obtained_alloc = 333;
test_data.obtained_dalloc = 444;
ptr = mallocx(big_size, 0);
free(ptr);
expect_u64_eq(333, test_data.obtained_alloc, "");
expect_u64_eq(444, test_data.obtained_dalloc, "");
}
TEST_END
static unsigned nuser_thread_event_cb_calls;
static void
user_thread_event_cb(bool is_alloc, uint64_t tallocated, uint64_t tdallocated) {
@ -1455,5 +1384,5 @@ main(void) {
test_stats_arenas_hpa_shard_counters,
test_stats_arenas_hpa_shard_slabs, test_hooks,
test_hooks_exhaustion, test_thread_idle, test_thread_peak,
test_thread_activity_callback, test_thread_event_hook);
test_thread_event_hook);
}