Compare commits

...

20 commits

Author SHA1 Message Date
Slobodan Predolac
f265645d02 Emit retained HPA slab stats in JSON 2026-04-01 23:15:19 -04:00
Slobodan Predolac
db7d99703d Add TODO to benchmark possibly better policy 2026-04-01 23:15:19 -04:00
Slobodan Predolac
6281482c39 Nest HPA SEC stats inside hpa_shard JSON 2026-04-01 23:15:19 -04:00
Slobodan Predolac
3cc56d325c Fix large alloc nrequests under-counting on cache misses 2026-04-01 23:15:19 -04:00
Slobodan Predolac
a47fa33b5a Run clang-format on test/unit/tcache_max.c 2026-04-01 23:15:19 -04:00
Slobodan Predolac
b507644cb0 Fix conf_handle_char_p zero-sized dest and remove unused conf_handle_unsigned 2026-04-01 23:15:19 -04:00
Slobodan Predolac
3ac9f96158 Run clang-format on test/unit/conf_parse.c 2026-04-01 23:15:19 -04:00
Slobodan Predolac
5904a42187 Fix memory leak of old curr_reg on san_bump_grow_locked failure
When san_bump_grow_locked fails, it sets sba->curr_reg to NULL.
The old curr_reg (saved in to_destroy) was never freed or restored,
leaking the virtual memory extent. Restore sba->curr_reg from
to_destroy on failure so the old region remains usable.
2026-04-01 23:15:19 -04:00
Slobodan Predolac
2fceece256 Fix extra size argument in edata_init call in extent_alloc_dss
An extra 'size' argument was passed where 'slab' (false) should be,
shifting all subsequent arguments: slab got size (nonzero=true),
szind got false (0), and sn got SC_NSIZES instead of a proper serial
number from extent_sn_next(). Match the correct pattern used by the
gap edata_init call above.
2026-04-01 23:15:19 -04:00
Slobodan Predolac
234404d324 Fix wrong loop variable for array index in sz_boot_pind2sz_tab
The sentinel fill loop used sz_pind2sz_tab[pind] (constant) instead
of sz_pind2sz_tab[i] (loop variable), writing only to the first
entry repeatedly and leaving subsequent entries uninitialized.
2026-04-01 23:15:19 -04:00
Slobodan Predolac
675ab079e7 Fix missing release of acquired neighbor edata in extent_try_coalesce_impl
When emap_try_acquire_edata_neighbor returned a non-NULL neighbor but
the size check failed, the neighbor was never released from
extent_state_merging, making it permanently invisible to future
allocation and coalescing operations.

Release the neighbor when it doesn't meet the size requirement,
matching the pattern used in extent_recycle_extract.
2026-04-01 23:15:19 -04:00
Slobodan Predolac
3f6e63e86a Fix wrong type for malloc_read_fd return value in prof_stack_range
Used size_t (unsigned) instead of ssize_t for the return value of
malloc_read_fd, which returns -1 on error. With size_t, -1 becomes
a huge positive value, bypassing the error check and corrupting the
remaining byte count.
2026-04-01 23:15:19 -04:00
Slobodan Predolac
dd30c91eaa Fix wrong fallback value in os_page_detect when sysconf fails
Returned LG_PAGE (log2 of page size, e.g. 12) instead of PAGE (actual
page size, e.g. 4096) when sysconf(_SC_PAGESIZE) failed. This would
cause os_page to be set to an absurdly small value, breaking all
page-aligned operations.
2026-04-01 23:15:19 -04:00
Slobodan Predolac
3a8bee81f1 Fix pac_mapped stats inflation on allocation failure
newly_mapped_size was set unconditionally in the ecache_alloc_grow
fallback path, even when the allocation returned NULL. This inflated
pac_mapped stats without a corresponding deallocation to correct them.

Guard the assignment with an edata != NULL check, matching the pattern
used in the batched allocation path above it.
2026-04-01 23:15:19 -04:00
Slobodan Predolac
c2d57040f0 Fix out-of-bounds write in malloc_vsnprintf when size is 0
When called with size==0, the else branch wrote to str[size-1] which
is str[(size_t)-1], a massive out-of-bounds write. Standard vsnprintf
allows size==0 to mean "compute length only, write nothing".

Add unit test for the size==0 case.
2026-04-01 23:15:19 -04:00
Slobodan Predolac
eab2b29736 Fix off-by-one in stats_arenas_i_bins_j and stats_arenas_i_lextents_j bounds checks
Same pattern as arenas_bin_i_index: used > instead of >= allowing
access one past the end of bstats[] and lstats[] arrays.

Add unit tests that verify boundary indices return ENOENT.
2026-04-01 23:15:19 -04:00
Slobodan Predolac
a0f2bdf91d Fix missing negation in large_ralloc_no_move usize_min fallback
The second expansion attempt in large_ralloc_no_move omitted the !
before large_ralloc_no_move_expand(), inverting the return value.
On expansion failure, the function falsely reported success, making
callers believe the allocation was expanded in-place when it was not.
On expansion success, the function falsely reported failure, causing
callers to unnecessarily allocate, copy, and free.

Add unit test that verifies the return value matches actual size change.
2026-04-01 23:15:19 -04:00
Slobodan Predolac
87f9938de5 Fix duplicate "nactive_huge" JSON key in HPA shard stats output
In both the full_slabs and empty_slabs JSON sections of HPA shard
stats, "nactive_huge" was emitted twice instead of emitting
"ndirty_huge" as the second entry. This caused ndirty_huge to be
missing from the JSON output entirely.

Add a unit test that verifies both sections contain "ndirty_huge".
2026-04-01 23:15:19 -04:00
Slobodan Predolac
513778bcb1 Fix off-by-one in arenas_bin_i_index and arenas_lextent_i_index bounds checks
The index validation used > instead of >=, allowing access at index
SC_NBINS (for bins) and SC_NSIZES-SC_NBINS (for lextents), which are
one past the valid range. This caused out-of-bounds reads in bin_infos[]
and sz_index2size_unsafe().

Add unit tests that verify the boundary indices return ENOENT.
2026-04-01 23:15:19 -04:00
Slobodan Predolac
176ea0a801 Remove experimental.thread.activity_callback 2026-04-01 16:23:41 -07:00
28 changed files with 777 additions and 312 deletions

View file

@ -248,6 +248,7 @@ TESTS_UNIT := \
$(srcroot)test/unit/junk_alloc.c \
$(srcroot)test/unit/junk_free.c \
$(srcroot)test/unit/json_stats.c \
$(srcroot)test/unit/large_ralloc.c \
$(srcroot)test/unit/log.c \
$(srcroot)test/unit/mallctl.c \
$(srcroot)test/unit/malloc_conf_2.c \

View file

@ -1,26 +0,0 @@
#ifndef JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H
#define JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H
#include "jemalloc/internal/jemalloc_preamble.h"
/*
* The callback to be executed "periodically", in response to some amount of
* allocator activity.
*
* This callback need not be computing any sort of peak (although that's the
* intended first use case), but we drive it from the peak counter, so it's
* keeps things tidy to keep it here.
*
* The calls to this thunk get driven by the peak_event module.
*/
#define ACTIVITY_CALLBACK_THUNK_INITIALIZER \
{ NULL, NULL }
typedef void (*activity_callback_t)(
void *uctx, uint64_t allocated, uint64_t deallocated);
typedef struct activity_callback_thunk_s activity_callback_thunk_t;
struct activity_callback_thunk_s {
activity_callback_t callback;
void *uctx;
};
#endif /* JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H */

View file

@ -9,19 +9,15 @@ void malloc_abort_invalid_conf(void);
#ifdef JEMALLOC_JET
extern bool had_conf_error;
bool conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
char const **v_p, size_t *vlen_p);
void conf_error(const char *msg, const char *k, size_t klen,
const char *v, size_t vlen);
void conf_error(
const char *msg, const char *k, size_t klen, const char *v, size_t vlen);
bool conf_handle_bool(const char *v, size_t vlen, bool *result);
bool conf_handle_unsigned(const char *v, size_t vlen,
uintmax_t min, uintmax_t max, bool check_min, bool check_max,
bool clip, uintmax_t *result);
bool conf_handle_signed(const char *v, size_t vlen,
intmax_t min, intmax_t max, bool check_min, bool check_max,
bool clip, intmax_t *result);
bool conf_handle_char_p(const char *v, size_t vlen,
char *dest, size_t dest_sz);
bool conf_handle_signed(const char *v, size_t vlen, intmax_t min, intmax_t max,
bool check_min, bool check_max, bool clip, intmax_t *result);
bool conf_handle_char_p(const char *v, size_t vlen, char *dest, size_t dest_sz);
#endif
#endif /* JEMALLOC_INTERNAL_CONF_H */

View file

@ -163,10 +163,10 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
assert(usize <= tcache_max_get(tcache->tcache_slow));
memset(ret, 0, usize);
}
}
if (config_stats) {
bin->tstats.nrequests++;
}
if (config_stats) {
bin->tstats.nrequests++;
}
return ret;

View file

@ -4,7 +4,6 @@
#define JEMALLOC_INTERNAL_TSD_INTERNALS_H
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/activity_callback.h"
#include "jemalloc/internal/arena_types.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/bin_types.h"
@ -84,8 +83,6 @@ typedef ql_elm(tsd_t) tsd_link_t;
O(tsd_link, tsd_link_t, tsd_link_t) \
O(in_hook, bool, bool) \
O(peak, peak_t, peak_t) \
O(activity_callback_thunk, activity_callback_thunk_t, \
activity_callback_thunk_t) \
O(tcache_slow, tcache_slow_t, tcache_slow_t) \
O(rtree_ctx, rtree_ctx_t, rtree_ctx_t)
@ -105,8 +102,7 @@ typedef ql_elm(tsd_t) tsd_link_t;
/* sec_shard */ (uint8_t) - 1, \
/* binshards */ TSD_BINSHARDS_ZERO_INITIALIZER, \
/* tsd_link */ {NULL}, /* in_hook */ false, \
/* peak */ PEAK_INITIALIZER, /* activity_callback_thunk */ \
ACTIVITY_CALLBACK_THUNK_INITIALIZER, \
/* peak */ PEAK_INITIALIZER, \
/* tcache_slow */ TCACHE_SLOW_ZERO_INITIALIZER, \
/* rtree_ctx */ RTREE_CTX_INITIALIZER,

View file

@ -20,6 +20,9 @@
*/
#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__
/* Number of elements in a fixed-size array. */
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
/* cpp macro definition stringification. */
#define STRINGIFY_HELPER(x) #x
#define STRINGIFY(x) STRINGIFY_HELPER(x)

View file

@ -254,36 +254,8 @@ JEMALLOC_DIAGNOSTIC_PUSH
JEMALLOC_DIAGNOSTIC_IGNORE("-Wunused-function")
JET_EXTERN bool
conf_handle_unsigned(const char *v, size_t vlen,
uintmax_t min, uintmax_t max, bool check_min, bool check_max,
bool clip, uintmax_t *result) {
char *end;
set_errno(0);
uintmax_t mv = (uintmax_t)malloc_strtoumax(v, &end, 0);
if (get_errno() != 0 || (uintptr_t)end - (uintptr_t)v != vlen) {
return true;
}
if (clip) {
if (check_min && mv < min) {
*result = min;
} else if (check_max && mv > max) {
*result = max;
} else {
*result = mv;
}
} else {
if ((check_min && mv < min) || (check_max && mv > max)) {
return true;
}
*result = mv;
}
return false;
}
JET_EXTERN bool
conf_handle_signed(const char *v, size_t vlen,
intmax_t min, intmax_t max, bool check_min, bool check_max,
bool clip, intmax_t *result) {
conf_handle_signed(const char *v, size_t vlen, intmax_t min, intmax_t max,
bool check_min, bool check_max, bool clip, intmax_t *result) {
char *end;
set_errno(0);
intmax_t mv = (intmax_t)malloc_strtoumax(v, &end, 0);
@ -309,6 +281,9 @@ conf_handle_signed(const char *v, size_t vlen,
JET_EXTERN bool
conf_handle_char_p(const char *v, size_t vlen, char *dest, size_t dest_sz) {
if (dest_sz == 0) {
return false;
}
size_t cpylen = (vlen <= dest_sz - 1) ? vlen : dest_sz - 1;
strncpy(dest, v, cpylen);
dest[cpylen] = '\0';
@ -473,11 +448,11 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
continue;
}
while (*opts != '\0'
&& !conf_next(&opts, &k, &klen, &v, &vlen)) {
while (
*opts != '\0' && !conf_next(&opts, &k, &klen, &v, &vlen)) {
#define CONF_ERROR(msg, k, klen, v, vlen) \
if (!initial_call) { \
conf_error(msg, k, klen, v, vlen); \
conf_error(msg, k, klen, v, vlen); \
cur_opt_valid = false; \
}
#define CONF_CONTINUE \

View file

@ -365,7 +365,6 @@ CTL_PROTO(experimental_hooks_prof_sample)
CTL_PROTO(experimental_hooks_prof_sample_free)
CTL_PROTO(experimental_hooks_thread_event)
CTL_PROTO(experimental_hooks_safety_check_abort)
CTL_PROTO(experimental_thread_activity_callback)
CTL_PROTO(experimental_utilization_query)
CTL_PROTO(experimental_utilization_batch_query)
CTL_PROTO(experimental_arenas_i_pactivep)
@ -890,9 +889,6 @@ static const ctl_named_node_t experimental_hooks_node[] = {
{NAME("thread_event"), CTL(experimental_hooks_thread_event)},
};
static const ctl_named_node_t experimental_thread_node[] = {
{NAME("activity_callback"), CTL(experimental_thread_activity_callback)}};
static const ctl_named_node_t experimental_utilization_node[] = {
{NAME("query"), CTL(experimental_utilization_query)},
{NAME("batch_query"), CTL(experimental_utilization_batch_query)}};
@ -916,8 +912,7 @@ static const ctl_named_node_t experimental_node[] = {
{NAME("arenas"), CHILD(indexed, experimental_arenas)},
{NAME("arenas_create_ext"), CTL(experimental_arenas_create_ext)},
{NAME("prof_recent"), CHILD(named, experimental_prof_recent)},
{NAME("batch_alloc"), CTL(experimental_batch_alloc)},
{NAME("thread"), CHILD(named, experimental_thread)}};
{NAME("batch_alloc"), CTL(experimental_batch_alloc)}};
static const ctl_named_node_t root_node[] = {{NAME("version"), CTL(version)},
{NAME("epoch"), CTL(epoch)},
@ -3255,7 +3250,7 @@ CTL_RO_NL_GEN(arenas_bin_i_slab_size, bin_infos[mib[2]].slab_size, size_t)
CTL_RO_NL_GEN(arenas_bin_i_nshards, bin_infos[mib[2]].n_shards, uint32_t)
static const ctl_named_node_t *
arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) {
if (i > SC_NBINS) {
if (i >= SC_NBINS) {
return NULL;
}
return super_arenas_bin_i_node;
@ -3267,7 +3262,7 @@ CTL_RO_NL_GEN(arenas_lextent_i_size,
static const ctl_named_node_t *
arenas_lextent_i_index(
tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) {
if (i > SC_NSIZES - SC_NBINS) {
if (i >= SC_NSIZES - SC_NBINS) {
return NULL;
}
return super_arenas_lextent_i_node;
@ -4003,7 +3998,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nonfull_slabs,
static const ctl_named_node_t *
stats_arenas_i_bins_j_index(
tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t j) {
if (j > SC_NBINS) {
if (j >= SC_NBINS) {
return NULL;
}
return super_stats_arenas_i_bins_j_node;
@ -4027,7 +4022,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents,
static const ctl_named_node_t *
stats_arenas_i_lextents_j_index(
tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t j) {
if (j > SC_NSIZES - SC_NBINS) {
if (j >= SC_NSIZES - SC_NBINS) {
return NULL;
}
return super_stats_arenas_i_lextents_j_node;
@ -4255,32 +4250,6 @@ label_return:
return ret;
}
static int
experimental_thread_activity_callback_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
if (!config_stats) {
return ENOENT;
}
activity_callback_thunk_t t_old = tsd_activity_callback_thunk_get(tsd);
READ(t_old, activity_callback_thunk_t);
if (newp != NULL) {
/*
* This initialization is unnecessary. If it's omitted, though,
* clang gets confused and warns on the subsequent use of t_new.
*/
activity_callback_thunk_t t_new = {NULL, NULL};
WRITE(t_new, activity_callback_thunk_t);
tsd_activity_callback_thunk_set(tsd, t_new);
}
ret = 0;
label_return:
return ret;
}
/*
* Output six memory utilization entries for an input pointer, the first one of
* type (void *) and the remaining five of type size_t, describing the following

View file

@ -916,15 +916,20 @@ extent_try_coalesce_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
size_t max_next_neighbor = max_size > edata_size_get(edata)
? max_size - edata_size_get(edata)
: 0;
if (next != NULL && edata_size_get(next) <= max_next_neighbor) {
if (!extent_coalesce(
tsdn, pac, ehooks, ecache, edata, next, true)) {
if (ecache->delay_coalesce) {
/* Do minimal coalescing. */
*coalesced = true;
return edata;
if (next != NULL) {
if (edata_size_get(next) > max_next_neighbor) {
emap_release_edata(
tsdn, pac->emap, next, ecache->state);
} else {
if (!extent_coalesce(tsdn, pac, ehooks, ecache,
edata, next, true)) {
if (ecache->delay_coalesce) {
/* Do minimal coalescing. */
*coalesced = true;
return edata;
}
again = true;
}
again = true;
}
}
@ -934,16 +939,21 @@ extent_try_coalesce_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
size_t max_prev_neighbor = max_size > edata_size_get(edata)
? max_size - edata_size_get(edata)
: 0;
if (prev != NULL && edata_size_get(prev) <= max_prev_neighbor) {
if (!extent_coalesce(tsdn, pac, ehooks, ecache, edata,
prev, false)) {
edata = prev;
if (ecache->delay_coalesce) {
/* Do minimal coalescing. */
*coalesced = true;
return edata;
if (prev != NULL) {
if (edata_size_get(prev) > max_prev_neighbor) {
emap_release_edata(
tsdn, pac->emap, prev, ecache->state);
} else {
if (!extent_coalesce(tsdn, pac, ehooks, ecache,
edata, prev, false)) {
edata = prev;
if (ecache->delay_coalesce) {
/* Do minimal coalescing. */
*coalesced = true;
return edata;
}
again = true;
}
again = true;
}
}
} while (again);

View file

@ -153,11 +153,14 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
- (uintptr_t)gap_addr_page;
if (gap_size_page != 0) {
edata_init(gap, arena_ind_get(arena),
gap_addr_page, gap_size_page, false,
SC_NSIZES,
gap_addr_page, gap_size_page,
/* slab */ false,
/* szind */ SC_NSIZES,
extent_sn_next(&arena->pa_shard.pac),
extent_state_active, false, true,
EXTENT_PAI_PAC, head_state);
extent_state_active,
/* zeroed */ false,
/* committed */ true,
/* pai */ EXTENT_PAI_PAC, head_state);
}
/*
* Compute the address just past the end of the desired
@ -203,9 +206,16 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
arena);
edata_init(&edata, arena_ind_get(arena),
ret, size, size, false, SC_NSIZES,
extent_state_active, false, true,
EXTENT_PAI_PAC, head_state);
ret, size,
/* slab */ false,
/* szind */ SC_NSIZES,
extent_sn_next(
&arena->pa_shard.pac),
extent_state_active,
/* zeroed */ false,
/* committed */ true,
/* pai */ EXTENT_PAI_PAC,
head_state);
if (extent_purge_forced_wrapper(tsdn,
ehooks, &edata, 0, size)) {
memset(ret, 0, size);

View file

@ -147,7 +147,7 @@ large_ralloc_no_move(tsdn_t *tsdn, edata_t *edata, size_t usize_min,
}
/* Try again, this time with usize_min. */
if (usize_min < usize_max && usize_min > oldusize
&& large_ralloc_no_move_expand(
&& !large_ralloc_no_move_expand(
tsdn, edata, usize_min, zero)) {
arena_decay_tick(tsdn, arena_get_from_edata(edata));
return false;

View file

@ -692,7 +692,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
label_out:
if (i < size) {
str[i] = '\0';
} else {
} else if (size != 0) {
str[size - 1] = '\0';
}

View file

@ -198,7 +198,9 @@ pac_alloc_real(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size,
edata = ecache_alloc_grow(tsdn, pac, ehooks,
&pac->ecache_retained, NULL, size, alignment, zero,
guarded);
newly_mapped_size = size;
if (edata != NULL) {
newly_mapped_size = size;
}
}
if (config_stats && newly_mapped_size != 0) {

View file

@ -718,7 +718,7 @@ os_page_detect(void) {
#else
long result = sysconf(_SC_PAGESIZE);
if (result == -1) {
return LG_PAGE;
return PAGE;
}
return (size_t)result;
#endif

View file

@ -3,7 +3,6 @@
#include "jemalloc/internal/peak_event.h"
#include "jemalloc/internal/activity_callback.h"
#include "jemalloc/internal/peak.h"
#include "jemalloc/internal/thread_event_registry.h"
@ -16,17 +15,6 @@ peak_event_update(tsd_t *tsd) {
peak_update(peak, alloc, dalloc);
}
static void
peak_event_activity_callback(tsd_t *tsd) {
activity_callback_thunk_t *thunk = tsd_activity_callback_thunkp_get(
tsd);
uint64_t alloc = tsd_thread_allocated_get(tsd);
uint64_t dalloc = tsd_thread_deallocated_get(tsd);
if (thunk->callback != NULL) {
thunk->callback(thunk->uctx, alloc, dalloc);
}
}
/* Set current state to zero. */
void
peak_event_zero(tsd_t *tsd) {
@ -55,7 +43,6 @@ peak_event_postponed_event_wait(tsd_t *tsd) {
static void
peak_event_handler(tsd_t *tsd) {
peak_event_update(tsd);
peak_event_activity_callback(tsd);
}
static te_enabled_t

View file

@ -73,17 +73,21 @@ prof_mapping_containing_addr(uintptr_t addr, const char *maps_path,
}
remaining = malloc_read_fd(fd, buf, sizeof(buf));
if (remaining <= 0) {
if (remaining < 0) {
ret = errno;
break;
} else if (remaining == 0) {
break;
}
line = buf;
} else if (line == NULL) {
/* case 1: no newline found in buf */
remaining = malloc_read_fd(fd, buf, sizeof(buf));
if (remaining <= 0) {
if (remaining < 0) {
ret = errno;
break;
} else if (remaining == 0) {
break;
}
line = memchr(buf, '\n', remaining);
if (line != NULL) {
@ -99,11 +103,13 @@ prof_mapping_containing_addr(uintptr_t addr, const char *maps_path,
remaining); /* copy remaining characters to start of buf */
line = buf;
size_t count = malloc_read_fd(
ssize_t count = malloc_read_fd(
fd, buf + remaining, sizeof(buf) - remaining);
if (count <= 0) {
if (count < 0) {
ret = errno;
break;
} else if (count == 0) {
break;
}
remaining +=

View file

@ -31,6 +31,7 @@ san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t *sba, pac_t *pac,
bool err = san_bump_grow_locked(
tsdn, sba, pac, ehooks, guarded_size);
if (err) {
sba->curr_reg = to_destroy;
goto label_err;
}
} else {

View file

@ -130,13 +130,17 @@ sec_multishard_trylock_alloc(
cur_shard = 0;
}
}
/* No bin had alloc or had the extent */
/*
* TODO: Benchmark whether it is worth blocking on all shards here before
* declaring a miss. That could recover more remote-shard hits under
* contention, but it also changes the allocation latency policy.
*/
assert(cur_shard == sec_shard_pick(tsdn, sec));
bin = sec_bin_pick(sec, cur_shard, pszind);
malloc_mutex_lock(tsdn, &bin->mtx);
edata_t *edata = sec_bin_alloc_locked(tsdn, sec, bin, size);
if (edata == NULL) {
/* Only now we know it is a miss */
/* Only now we know it is a miss. */
bin->stats.nmisses++;
}
malloc_mutex_unlock(tsdn, &bin->mtx);

View file

@ -981,13 +981,15 @@ stats_arena_hpa_shard_slabs_print(emitter_t *emitter, unsigned i) {
emitter_json_kv(
emitter, "nactive_huge", emitter_type_size, &nactive_huge);
emitter_json_kv(
emitter, "nactive_huge", emitter_type_size, &nactive_huge);
emitter, "ndirty_huge", emitter_type_size, &ndirty_huge);
emitter_json_kv(emitter, "npageslabs_nonhuge", emitter_type_size,
&npageslabs_nonhuge);
emitter_json_kv(
emitter, "nactive_nonhuge", emitter_type_size, &nactive_nonhuge);
emitter_json_kv(
emitter, "ndirty_nonhuge", emitter_type_size, &ndirty_nonhuge);
emitter_json_kv(emitter, "nretained_nonhuge", emitter_type_size,
&nretained_nonhuge);
emitter_json_object_end(emitter); /* End "full_slabs" */
/* Next, empty slab stats. */
@ -1022,13 +1024,15 @@ stats_arena_hpa_shard_slabs_print(emitter_t *emitter, unsigned i) {
emitter_json_kv(
emitter, "nactive_huge", emitter_type_size, &nactive_huge);
emitter_json_kv(
emitter, "nactive_huge", emitter_type_size, &nactive_huge);
emitter, "ndirty_huge", emitter_type_size, &ndirty_huge);
emitter_json_kv(emitter, "npageslabs_nonhuge", emitter_type_size,
&npageslabs_nonhuge);
emitter_json_kv(
emitter, "nactive_nonhuge", emitter_type_size, &nactive_nonhuge);
emitter_json_kv(
emitter, "ndirty_nonhuge", emitter_type_size, &ndirty_nonhuge);
emitter_json_kv(emitter, "nretained_nonhuge", emitter_type_size,
&nretained_nonhuge);
emitter_json_object_end(emitter); /* End "empty_slabs" */
/* Last, nonfull slab stats. */
@ -1103,6 +1107,8 @@ stats_arena_hpa_shard_slabs_print(emitter_t *emitter, unsigned i) {
&nactive_nonhuge);
emitter_json_kv(emitter, "ndirty_nonhuge", emitter_type_size,
&ndirty_nonhuge);
emitter_json_kv(emitter, "nretained_nonhuge", emitter_type_size,
&nretained_nonhuge);
emitter_json_object_end(emitter);
}
emitter_json_array_end(emitter); /* End "nonfull_slabs" */
@ -1113,9 +1119,8 @@ stats_arena_hpa_shard_slabs_print(emitter_t *emitter, unsigned i) {
static void
stats_arena_hpa_shard_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
stats_arena_hpa_shard_sec_print(emitter, i);
emitter_json_object_kv_begin(emitter, "hpa_shard");
stats_arena_hpa_shard_sec_print(emitter, i);
stats_arena_hpa_shard_counters_print(emitter, i, uptime);
stats_arena_hpa_shard_slabs_print(emitter, i);
emitter_json_object_end(emitter); /* End "hpa_shard" */

View file

@ -65,7 +65,7 @@ sz_boot_pind2sz_tab(const sc_data_t *sc_data) {
}
}
for (int i = pind; i <= (int)SC_NPSIZES; i++) {
sz_pind2sz_tab[pind] = sc_data->large_maxclass + PAGE;
sz_pind2sz_tab[i] = sc_data->large_maxclass + PAGE;
}
}
@ -93,7 +93,7 @@ sz_boot_size2index_tab(const sc_data_t *sc_data) {
size_t dst_max = (SC_LOOKUP_MAXCLASS >> SC_LG_TINY_MIN) + 1;
size_t dst_ind = 0;
for (unsigned sc_ind = 0; sc_ind < SC_NSIZES && dst_ind < dst_max;
sc_ind++) {
sc_ind++) {
const sc_t *sc = &sc_data->sc[sc_ind];
size_t sz = (ZU(1) << sc->lg_base)
+ (ZU(sc->ndelta) << sc->lg_delta);

View file

@ -25,55 +25,10 @@ TEST_BEGIN(test_conf_handle_bool_invalid) {
}
TEST_END
TEST_BEGIN(test_conf_handle_unsigned_in_range) {
uintmax_t result = 0;
bool err = conf_handle_unsigned("100", sizeof("100") - 1,
1, 2048, true, true, true, &result);
expect_false(err, "Should succeed for in-range value");
expect_u64_eq((uint64_t)result, 100, "result should be 100");
}
TEST_END
TEST_BEGIN(test_conf_handle_unsigned_clip_max) {
uintmax_t result = 0;
bool err = conf_handle_unsigned("9999", sizeof("9999") - 1,
1, 2048, true, true, true, &result);
expect_false(err, "Should succeed with clipping");
expect_u64_eq((uint64_t)result, 2048,
"result should be clipped to max 2048");
}
TEST_END
TEST_BEGIN(test_conf_handle_unsigned_clip_min) {
uintmax_t result = 0;
bool err = conf_handle_unsigned("0", sizeof("0") - 1,
1, 2048, true, true, true, &result);
expect_false(err, "Should succeed with clipping");
expect_u64_eq((uint64_t)result, 1,
"result should be clipped to min 1");
}
TEST_END
TEST_BEGIN(test_conf_handle_unsigned_no_clip_reject) {
uintmax_t result = 0;
bool err = conf_handle_unsigned("9999", sizeof("9999") - 1,
1, 2048, true, true, false, &result);
expect_true(err, "Should fail for out-of-range value without clip");
}
TEST_END
TEST_BEGIN(test_conf_handle_unsigned_invalid) {
uintmax_t result = 0;
bool err = conf_handle_unsigned("abc", sizeof("abc") - 1,
1, 2048, true, true, true, &result);
expect_true(err, "Should fail for non-numeric input");
}
TEST_END
TEST_BEGIN(test_conf_handle_signed_valid) {
intmax_t result = 0;
bool err = conf_handle_signed("5000", sizeof("5000") - 1,
-1, INTMAX_MAX, true, false, false, &result);
bool err = conf_handle_signed("5000", sizeof("5000") - 1, -1,
INTMAX_MAX, true, false, false, &result);
expect_false(err, "Should succeed for valid value");
expect_d64_eq((int64_t)result, 5000, "result should be 5000");
}
@ -81,8 +36,8 @@ TEST_END
TEST_BEGIN(test_conf_handle_signed_negative) {
intmax_t result = 0;
bool err = conf_handle_signed("-1", sizeof("-1") - 1,
-1, INTMAX_MAX, true, false, false, &result);
bool err = conf_handle_signed("-1", sizeof("-1") - 1, -1, INTMAX_MAX,
true, false, false, &result);
expect_false(err, "Should succeed for -1");
expect_d64_eq((int64_t)result, -1, "result should be -1");
}
@ -90,8 +45,8 @@ TEST_END
TEST_BEGIN(test_conf_handle_signed_out_of_range) {
intmax_t result = 0;
bool err = conf_handle_signed("5000", sizeof("5000") - 1,
-1, 4999, true, true, false, &result);
bool err = conf_handle_signed(
"5000", sizeof("5000") - 1, -1, 4999, true, true, false, &result);
expect_true(err, "Should fail for out-of-range value");
}
TEST_END
@ -101,30 +56,34 @@ TEST_BEGIN(test_conf_handle_char_p) {
bool err;
/* Normal copy. */
err = conf_handle_char_p("hello", sizeof("hello") - 1, buf, sizeof(buf));
err = conf_handle_char_p(
"hello", sizeof("hello") - 1, buf, sizeof(buf));
expect_false(err, "Should succeed");
expect_str_eq(buf, "hello", "Should copy string");
/* Truncation. */
err = conf_handle_char_p("longstring", sizeof("longstring") - 1,
buf, sizeof(buf));
err = conf_handle_char_p(
"longstring", sizeof("longstring") - 1, buf, sizeof(buf));
expect_false(err, "Should succeed even when truncating");
expect_str_eq(buf, "longstr", "Should truncate to dest_sz - 1");
}
TEST_END
TEST_BEGIN(test_conf_handle_char_p_zero_dest_sz) {
char buf[4] = {'X', 'Y', 'Z', '\0'};
bool err;
err = conf_handle_char_p("abc", sizeof("abc") - 1, buf, 0);
expect_false(err, "Should succeed for zero-sized destination");
expect_c_eq(buf[0], 'X', "Zero-sized destination must not be modified");
}
TEST_END
int
main(void) {
return test(test_conf_handle_bool_true,
test_conf_handle_bool_false,
test_conf_handle_bool_invalid,
test_conf_handle_unsigned_in_range,
test_conf_handle_unsigned_clip_max,
test_conf_handle_unsigned_clip_min,
test_conf_handle_unsigned_no_clip_reject,
test_conf_handle_unsigned_invalid,
test_conf_handle_signed_valid,
return test(test_conf_handle_bool_true, test_conf_handle_bool_false,
test_conf_handle_bool_invalid, test_conf_handle_signed_valid,
test_conf_handle_signed_negative,
test_conf_handle_signed_out_of_range,
test_conf_handle_char_p);
test_conf_handle_signed_out_of_range, test_conf_handle_char_p,
test_conf_handle_char_p_zero_dest_sz);
}

View file

@ -185,6 +185,109 @@ static const char *arena_mutex_names[] = {"large", "extent_avail",
static const size_t num_arena_mutexes = sizeof(arena_mutex_names)
/ sizeof(arena_mutex_names[0]);
static const char *
json_find_object_end(const char *object_begin) {
int depth = 0;
for (const char *cur = object_begin; *cur != '\0'; cur++) {
if (*cur == '{') {
depth++;
} else if (*cur == '}') {
depth--;
if (depth == 0) {
return cur;
}
if (depth < 0) {
return NULL;
}
}
}
return NULL;
}
static const char *
json_find_array_end(const char *array_begin) {
int depth = 0;
for (const char *cur = array_begin; *cur != '\0'; cur++) {
if (*cur == '[') {
depth++;
} else if (*cur == ']') {
depth--;
if (depth == 0) {
return cur;
}
if (depth < 0) {
return NULL;
}
}
}
return NULL;
}
static const char *
json_find_previous_hpa_shard_object(
const char *json, const char *pos, const char **object_end) {
*object_end = NULL;
const char *found = NULL;
const char *cur = json;
const char *next;
while ((next = strstr(cur, "\"hpa_shard\":{")) != NULL && next < pos) {
found = strchr(next, '{');
cur = next + 1;
}
if (found == NULL) {
return NULL;
}
*object_end = json_find_object_end(found);
return found;
}
static const char *
json_find_named_object(
const char *json, const char *key, const char **object_end) {
*object_end = NULL;
char search_key[128];
size_t written = malloc_snprintf(
search_key, sizeof(search_key), "\"%s\":{", key);
if (written >= sizeof(search_key)) {
return NULL;
}
const char *object_begin = strstr(json, search_key);
if (object_begin == NULL) {
return NULL;
}
object_begin = strchr(object_begin, '{');
if (object_begin == NULL) {
return NULL;
}
*object_end = json_find_object_end(object_begin);
return object_begin;
}
static const char *
json_find_named_array(
const char *json, const char *key, const char **array_end) {
*array_end = NULL;
char search_key[128];
size_t written = malloc_snprintf(
search_key, sizeof(search_key), "\"%s\":[", key);
if (written >= sizeof(search_key)) {
return NULL;
}
const char *array_begin = strstr(json, search_key);
if (array_begin == NULL) {
return NULL;
}
array_begin = strchr(array_begin, '[');
if (array_begin == NULL) {
return NULL;
}
*array_end = json_find_array_end(array_begin);
return array_begin;
}
TEST_BEGIN(test_json_stats_mutexes) {
test_skip_if(!config_stats);
@ -237,7 +340,170 @@ TEST_BEGIN(test_json_stats_mutexes) {
}
TEST_END
/*
* Verify that hpa_shard JSON stats contain "ndirty_huge" key in both
* full_slabs and empty_slabs sections. A previous bug emitted duplicate
* "nactive_huge" instead of "ndirty_huge".
*/
TEST_BEGIN(test_hpa_shard_json_ndirty_huge) {
test_skip_if(!config_stats);
test_skip_if(!hpa_supported());
/* Do some allocation to create HPA state. */
void *p = mallocx(PAGE, MALLOCX_TCACHE_NONE);
expect_ptr_not_null(p, "Unexpected mallocx failure");
uint64_t epoch = 1;
size_t sz = sizeof(epoch);
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sz), 0,
"Unexpected mallctl() failure");
stats_buf_t sbuf;
stats_buf_init(&sbuf);
/* "J" for JSON, include per-arena HPA stats. */
malloc_stats_print(stats_buf_write_cb, &sbuf, "J");
/*
* Find "full_slabs" and check it contains "ndirty_huge".
*/
const char *full_slabs = strstr(sbuf.buf, "\"full_slabs\"");
if (full_slabs != NULL) {
const char *empty_slabs = strstr(full_slabs, "\"empty_slabs\"");
const char *search_end = empty_slabs != NULL
? empty_slabs
: sbuf.buf + sbuf.len;
/*
* Search for "ndirty_huge" between full_slabs and
* empty_slabs.
*/
const char *ndirty = full_slabs;
bool found = false;
while (ndirty < search_end) {
ndirty = strstr(ndirty, "\"ndirty_huge\"");
if (ndirty != NULL && ndirty < search_end) {
found = true;
break;
}
break;
}
expect_true(
found, "full_slabs section should contain ndirty_huge key");
}
/*
* Find "empty_slabs" and check it contains "ndirty_huge".
*/
const char *empty_slabs = strstr(sbuf.buf, "\"empty_slabs\"");
if (empty_slabs != NULL) {
/* Find the end of the empty_slabs object. */
const char *nonfull = strstr(empty_slabs, "\"nonfull_slabs\"");
const char *search_end = nonfull != NULL ? nonfull
: sbuf.buf + sbuf.len;
const char *ndirty = strstr(empty_slabs, "\"ndirty_huge\"");
bool found = (ndirty != NULL && ndirty < search_end);
expect_true(found,
"empty_slabs section should contain ndirty_huge key");
}
stats_buf_fini(&sbuf);
dallocx(p, MALLOCX_TCACHE_NONE);
}
TEST_END
TEST_BEGIN(test_hpa_shard_json_contains_sec_stats) {
test_skip_if(!config_stats);
test_skip_if(!hpa_supported());
void *p = mallocx(PAGE, MALLOCX_TCACHE_NONE);
expect_ptr_not_null(p, "Unexpected mallocx failure");
uint64_t epoch = 1;
size_t sz = sizeof(epoch);
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sz), 0,
"Unexpected mallctl() failure");
stats_buf_t sbuf;
stats_buf_init(&sbuf);
malloc_stats_print(stats_buf_write_cb, &sbuf, "J");
const char *sec_bytes = strstr(sbuf.buf, "\"sec_bytes\"");
expect_ptr_not_null(sec_bytes, "JSON output should contain sec_bytes");
const char *hpa_shard_end = NULL;
const char *hpa_shard = json_find_previous_hpa_shard_object(
sbuf.buf, sec_bytes, &hpa_shard_end);
expect_ptr_not_null(hpa_shard,
"sec_bytes should be associated with an hpa_shard JSON object");
expect_ptr_not_null(hpa_shard_end,
"Could not find end of enclosing hpa_shard JSON object");
expect_true(sec_bytes != NULL && sec_bytes < hpa_shard_end,
"sec_bytes should be nested inside hpa_shard JSON object");
const char *sec_hits = strstr(hpa_shard, "\"sec_hits\"");
expect_true(sec_hits != NULL && sec_hits < hpa_shard_end,
"sec_hits should be nested inside hpa_shard JSON object");
const char *sec_misses = strstr(hpa_shard, "\"sec_misses\"");
expect_true(sec_misses != NULL && sec_misses < hpa_shard_end,
"sec_misses should be nested inside hpa_shard JSON object");
stats_buf_fini(&sbuf);
dallocx(p, MALLOCX_TCACHE_NONE);
}
TEST_END
TEST_BEGIN(test_hpa_shard_json_contains_retained_stats) {
test_skip_if(!config_stats);
test_skip_if(!hpa_supported());
void *p = mallocx(PAGE, MALLOCX_TCACHE_NONE);
expect_ptr_not_null(p, "Unexpected mallocx failure");
uint64_t epoch = 1;
size_t sz = sizeof(epoch);
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sz), 0,
"Unexpected mallctl() failure");
stats_buf_t sbuf;
stats_buf_init(&sbuf);
malloc_stats_print(stats_buf_write_cb, &sbuf, "J");
const char *full_slabs_end = NULL;
const char *full_slabs = json_find_named_object(
sbuf.buf, "full_slabs", &full_slabs_end);
expect_ptr_not_null(
full_slabs, "JSON output should contain full_slabs");
const char *full_retained = strstr(full_slabs, "\"nretained_nonhuge\"");
expect_true(full_retained != NULL && full_retained < full_slabs_end,
"full_slabs should contain nretained_nonhuge");
const char *empty_slabs_end = NULL;
const char *empty_slabs = json_find_named_object(
sbuf.buf, "empty_slabs", &empty_slabs_end);
expect_ptr_not_null(
empty_slabs, "JSON output should contain empty_slabs");
const char *empty_retained = strstr(
empty_slabs, "\"nretained_nonhuge\"");
expect_true(empty_retained != NULL && empty_retained < empty_slabs_end,
"empty_slabs should contain nretained_nonhuge");
const char *nonfull_slabs_end = NULL;
const char *nonfull_slabs = json_find_named_array(
sbuf.buf, "nonfull_slabs", &nonfull_slabs_end);
expect_ptr_not_null(
nonfull_slabs, "JSON output should contain nonfull_slabs");
const char *nonfull_retained = strstr(
nonfull_slabs, "\"nretained_nonhuge\"");
expect_true(
nonfull_retained != NULL && nonfull_retained < nonfull_slabs_end,
"nonfull_slabs should contain nretained_nonhuge");
stats_buf_fini(&sbuf);
dallocx(p, MALLOCX_TCACHE_NONE);
}
TEST_END
int
main(void) {
return test(test_json_stats_mutexes);
return test_no_reentrancy(test_json_stats_mutexes,
test_hpa_shard_json_ndirty_huge,
test_hpa_shard_json_contains_sec_stats,
test_hpa_shard_json_contains_retained_stats);
}

76
test/unit/large_ralloc.c Normal file
View file

@ -0,0 +1,76 @@
#include "test/jemalloc_test.h"
/*
* Test that large_ralloc_no_move causes a failure (returns true) when
* in-place extent expansion cannot succeed for either usize_max or
* usize_min.
*
* A previous bug omitted the ! negation on the second extent expansion
* attempt (usize_min fallback), causing false success (return false) when
* the expansion actually failed.
*/
TEST_BEGIN(test_large_ralloc_no_move_expand_fail) {
/*
* Allocate two adjacent large objects in the same arena to block
* in-place expansion of the first one.
*/
unsigned arena_ind;
size_t sz = sizeof(arena_ind);
expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
0, "Unexpected mallctl() failure");
int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
size_t large_sz = SC_LARGE_MINCLASS;
/* Allocate several blocks to prevent expansion of the first. */
void *blocks[8];
for (size_t i = 0; i < ARRAY_SIZE(blocks); i++) {
blocks[i] = mallocx(large_sz, flags);
expect_ptr_not_null(blocks[i], "Unexpected mallocx() failure");
}
/*
* Try to expand blocks[0] in place. Use usize_min < usize_max to
* exercise the fallback path.
*/
tsd_t *tsd = tsd_fetch();
edata_t *edata = emap_edata_lookup(
tsd_tsdn(tsd), &arena_emap_global, blocks[0]);
expect_ptr_not_null(edata, "Unexpected edata lookup failure");
size_t oldusize = edata_usize_get(edata);
size_t usize_min = sz_s2u(oldusize + 1);
size_t usize_max = sz_s2u(oldusize * 2);
/* Ensure min and max are in different size classes. */
if (usize_min == usize_max) {
usize_max = sz_s2u(usize_min + 1);
}
bool ret = large_ralloc_no_move(
tsd_tsdn(tsd), edata, usize_min, usize_max, false);
/*
* With adjacent allocations blocking expansion, this should fail.
* The bug caused ret == false (success) even when expansion failed.
*/
if (!ret) {
/*
* Expansion might actually succeed if adjacent memory
* is free. Verify the size actually changed.
*/
size_t newusize = edata_usize_get(edata);
expect_zu_ge(newusize, usize_min,
"Expansion reported success but size didn't change");
}
for (size_t i = 0; i < ARRAY_SIZE(blocks); i++) {
dallocx(blocks[i], flags);
}
}
TEST_END
int
main(void) {
return test_no_reentrancy(test_large_ralloc_no_move_expand_fail);
}

View file

@ -956,6 +956,105 @@ TEST_BEGIN(test_arenas_bin_constants) {
}
TEST_END
TEST_BEGIN(test_arenas_bin_oob) {
size_t sz;
size_t result;
char buf[128];
/*
* Querying the bin at index SC_NBINS should fail because valid
* indices are [0, SC_NBINS).
*/
sz = sizeof(result);
malloc_snprintf(
buf, sizeof(buf), "arenas.bin.%u.size", (unsigned)SC_NBINS);
expect_d_eq(mallctl(buf, (void *)&result, &sz, NULL, 0), ENOENT,
"mallctl() should fail for out-of-bounds bin index SC_NBINS");
/* One below the boundary should succeed. */
malloc_snprintf(
buf, sizeof(buf), "arenas.bin.%u.size", (unsigned)(SC_NBINS - 1));
expect_d_eq(mallctl(buf, (void *)&result, &sz, NULL, 0), 0,
"mallctl() should succeed for valid bin index SC_NBINS-1");
}
TEST_END
TEST_BEGIN(test_arenas_lextent_oob) {
size_t sz;
size_t result;
char buf[128];
unsigned nlextents = SC_NSIZES - SC_NBINS;
/*
* Querying the lextent at index nlextents should fail because valid
* indices are [0, nlextents).
*/
sz = sizeof(result);
malloc_snprintf(buf, sizeof(buf), "arenas.lextent.%u.size", nlextents);
expect_d_eq(mallctl(buf, (void *)&result, &sz, NULL, 0), ENOENT,
"mallctl() should fail for out-of-bounds lextent index");
/* Querying the last element (nlextents - 1) should succeed. */
malloc_snprintf(
buf, sizeof(buf), "arenas.lextent.%u.size", nlextents - 1);
expect_d_eq(mallctl(buf, (void *)&result, &sz, NULL, 0), 0,
"mallctl() should succeed for valid lextent index");
}
TEST_END
TEST_BEGIN(test_stats_arenas_bins_oob) {
test_skip_if(!config_stats);
size_t sz;
uint64_t result;
char buf[128];
uint64_t epoch = 1;
sz = sizeof(epoch);
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sz), 0,
"Unexpected mallctl() failure");
/* SC_NBINS is one past the valid range. */
sz = sizeof(result);
malloc_snprintf(buf, sizeof(buf), "stats.arenas.0.bins.%u.nmalloc",
(unsigned)SC_NBINS);
expect_d_eq(mallctl(buf, (void *)&result, &sz, NULL, 0), ENOENT,
"mallctl() should fail for out-of-bounds stats bin index");
/* SC_NBINS - 1 is valid. */
malloc_snprintf(buf, sizeof(buf), "stats.arenas.0.bins.%u.nmalloc",
(unsigned)(SC_NBINS - 1));
expect_d_eq(mallctl(buf, (void *)&result, &sz, NULL, 0), 0,
"mallctl() should succeed for valid stats bin index");
}
TEST_END
TEST_BEGIN(test_stats_arenas_lextents_oob) {
test_skip_if(!config_stats);
size_t sz;
uint64_t result;
char buf[128];
unsigned nlextents = SC_NSIZES - SC_NBINS;
uint64_t epoch = 1;
sz = sizeof(epoch);
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sz), 0,
"Unexpected mallctl() failure");
/* nlextents is one past the valid range. */
sz = sizeof(result);
malloc_snprintf(
buf, sizeof(buf), "stats.arenas.0.lextents.%u.nmalloc", nlextents);
expect_d_eq(mallctl(buf, (void *)&result, &sz, NULL, 0), ENOENT,
"mallctl() should fail for out-of-bounds stats lextent index");
/* nlextents - 1 is valid. */
malloc_snprintf(buf, sizeof(buf), "stats.arenas.0.lextents.%u.nmalloc",
nlextents - 1);
expect_d_eq(mallctl(buf, (void *)&result, &sz, NULL, 0), 0,
"mallctl() should succeed for valid stats lextent index");
}
TEST_END
TEST_BEGIN(test_arenas_lextent_constants) {
#define TEST_ARENAS_LEXTENT_CONSTANT(t, name, expected) \
do { \
@ -1332,77 +1431,6 @@ TEST_BEGIN(test_thread_peak) {
}
TEST_END
typedef struct activity_test_data_s activity_test_data_t;
struct activity_test_data_s {
uint64_t obtained_alloc;
uint64_t obtained_dalloc;
};
static void
activity_test_callback(void *uctx, uint64_t alloc, uint64_t dalloc) {
activity_test_data_t *test_data = (activity_test_data_t *)uctx;
test_data->obtained_alloc = alloc;
test_data->obtained_dalloc = dalloc;
}
TEST_BEGIN(test_thread_activity_callback) {
test_skip_if(!config_stats);
const size_t big_size = 10 * 1024 * 1024;
void *ptr;
int err;
size_t sz;
uint64_t *allocatedp;
uint64_t *deallocatedp;
sz = sizeof(allocatedp);
err = mallctl("thread.allocatedp", &allocatedp, &sz, NULL, 0);
assert_d_eq(0, err, "");
err = mallctl("thread.deallocatedp", &deallocatedp, &sz, NULL, 0);
assert_d_eq(0, err, "");
activity_callback_thunk_t old_thunk = {
(activity_callback_t)111, (void *)222};
activity_test_data_t test_data = {333, 444};
activity_callback_thunk_t new_thunk = {
&activity_test_callback, &test_data};
sz = sizeof(old_thunk);
err = mallctl("experimental.thread.activity_callback", &old_thunk, &sz,
&new_thunk, sizeof(new_thunk));
assert_d_eq(0, err, "");
expect_true(old_thunk.callback == NULL, "Callback already installed");
expect_true(old_thunk.uctx == NULL, "Callback data already installed");
ptr = mallocx(big_size, 0);
expect_u64_eq(test_data.obtained_alloc, *allocatedp, "");
expect_u64_eq(test_data.obtained_dalloc, *deallocatedp, "");
free(ptr);
expect_u64_eq(test_data.obtained_alloc, *allocatedp, "");
expect_u64_eq(test_data.obtained_dalloc, *deallocatedp, "");
sz = sizeof(old_thunk);
new_thunk = (activity_callback_thunk_t){NULL, NULL};
err = mallctl("experimental.thread.activity_callback", &old_thunk, &sz,
&new_thunk, sizeof(new_thunk));
assert_d_eq(0, err, "");
expect_true(old_thunk.callback == &activity_test_callback, "");
expect_true(old_thunk.uctx == &test_data, "");
/* Inserting NULL should have turned off tracking. */
test_data.obtained_alloc = 333;
test_data.obtained_dalloc = 444;
ptr = mallocx(big_size, 0);
free(ptr);
expect_u64_eq(333, test_data.obtained_alloc, "");
expect_u64_eq(444, test_data.obtained_dalloc, "");
}
TEST_END
static unsigned nuser_thread_event_cb_calls;
static void
user_thread_event_cb(bool is_alloc, uint64_t tallocated, uint64_t tdallocated) {
@ -1450,10 +1478,12 @@ main(void) {
test_arena_i_dss, test_arena_i_name, test_arena_i_retain_grow_limit,
test_arenas_dirty_decay_ms, test_arenas_muzzy_decay_ms,
test_arenas_constants, test_arenas_bin_constants,
test_arenas_bin_oob, test_arenas_lextent_oob,
test_stats_arenas_bins_oob, test_stats_arenas_lextents_oob,
test_arenas_lextent_constants, test_arenas_create,
test_arenas_lookup, test_prof_active, test_stats_arenas,
test_stats_arenas_hpa_shard_counters,
test_stats_arenas_hpa_shard_slabs, test_hooks,
test_hooks_exhaustion, test_thread_idle, test_thread_peak,
test_thread_activity_callback, test_thread_event_hook);
test_thread_event_hook);
}

View file

@ -252,8 +252,26 @@ TEST_BEGIN(test_malloc_snprintf) {
}
TEST_END
TEST_BEGIN(test_malloc_snprintf_zero_size) {
char buf[8];
size_t result;
/*
* malloc_snprintf with size==0 should not write anything but should
* return the length that would have been written. A previous bug
* caused an out-of-bounds write via str[size - 1] when size was 0.
*/
memset(buf, 'X', sizeof(buf));
result = malloc_snprintf(buf, 0, "%s", "hello");
expect_zu_eq(result, 5, "Expected length 5 for \"hello\"");
/* buf should be untouched. */
expect_c_eq(buf[0], 'X', "Buffer should not have been modified");
}
TEST_END
int
main(void) {
return test(test_malloc_strtoumax_no_endptr, test_malloc_strtoumax,
test_malloc_snprintf_truncated, test_malloc_snprintf);
test_malloc_snprintf_truncated, test_malloc_snprintf,
test_malloc_snprintf_zero_size);
}

View file

@ -121,7 +121,52 @@ TEST_BEGIN(test_alloc_free_purge_thds) {
}
TEST_END
TEST_BEGIN(test_failed_coalesce_releases_neighbor) {
test_skip_if(!maps_coalesce);
test_data_t *test_data = init_test_data(-1, -1);
size_t old_lg_extent_max_active_fit = opt_lg_extent_max_active_fit;
opt_lg_extent_max_active_fit = 0;
bool deferred_work_generated = false;
size_t unit = SC_LARGE_MINCLASS;
size_t alloc_size = 4 * unit;
edata_t *edata = pa_alloc(TSDN_NULL, &test_data->shard, alloc_size,
PAGE,
/* slab */ false, sz_size2index(alloc_size), /* zero */ false,
/* guarded */ false, &deferred_work_generated);
expect_ptr_not_null(edata, "Unexpected pa_alloc() failure");
void *tail_addr = (void *)((uintptr_t)edata_base_get(edata) + unit);
expect_false(pa_shrink(TSDN_NULL, &test_data->shard, edata, alloc_size,
unit, sz_size2index(unit), &deferred_work_generated),
"Unexpected pa_shrink() failure");
edata_t *tail = emap_edata_lookup(
TSDN_NULL, &test_data->emap, tail_addr);
expect_ptr_not_null(tail, "Expected dirty tail extent after shrink");
expect_ptr_eq(
edata_base_get(tail), tail_addr, "Unexpected tail extent address");
expect_zu_eq(
edata_size_get(tail), 3 * unit, "Unexpected tail extent size");
expect_d_eq(edata_state_get(tail), extent_state_dirty,
"Expected tail extent to start dirty");
pa_dalloc(
TSDN_NULL, &test_data->shard, edata, &deferred_work_generated);
tail = emap_edata_lookup(TSDN_NULL, &test_data->emap, tail_addr);
expect_ptr_not_null(
tail, "Expected oversized dirty neighbor to remain discoverable");
expect_d_eq(edata_state_get(tail), extent_state_dirty,
"Failed coalesce must release oversized dirty neighbor");
opt_lg_extent_max_active_fit = old_lg_extent_max_active_fit;
}
TEST_END
int
main(void) {
return test(test_alloc_free_purge_thds);
return test(
test_alloc_free_purge_thds, test_failed_coalesce_releases_neighbor);
}

View file

@ -4,6 +4,50 @@
#include "jemalloc/internal/arena_structs.h"
#include "jemalloc/internal/san_bump.h"
static extent_hooks_t *san_bump_default_hooks;
static extent_hooks_t san_bump_hooks;
static bool fail_retained_alloc;
static unsigned retained_alloc_fail_calls;
static void *
san_bump_fail_alloc_hook(extent_hooks_t *UNUSED extent_hooks, void *new_addr,
size_t size, size_t alignment, bool *zero, bool *commit,
unsigned arena_ind) {
if (fail_retained_alloc && new_addr == NULL
&& size >= SBA_RETAINED_ALLOC_SIZE) {
retained_alloc_fail_calls++;
return NULL;
}
return san_bump_default_hooks->alloc(san_bump_default_hooks, new_addr,
size, alignment, zero, commit, arena_ind);
}
static void
install_san_bump_fail_alloc_hooks(unsigned arena_ind) {
size_t hooks_mib[3];
size_t hooks_miblen = sizeof(hooks_mib) / sizeof(size_t);
size_t old_size = sizeof(extent_hooks_t *);
size_t new_size = sizeof(extent_hooks_t *);
extent_hooks_t *new_hooks;
extent_hooks_t *old_hooks;
expect_d_eq(
mallctlnametomib("arena.0.extent_hooks", hooks_mib, &hooks_miblen),
0, "Unexpected mallctlnametomib() failure");
hooks_mib[1] = (size_t)arena_ind;
expect_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks,
&old_size, NULL, 0),
0, "Unexpected extent_hooks error");
san_bump_default_hooks = old_hooks;
san_bump_hooks = *old_hooks;
san_bump_hooks.alloc = san_bump_fail_alloc_hook;
new_hooks = &san_bump_hooks;
expect_d_eq(mallctlbymib(hooks_mib, hooks_miblen, NULL, NULL,
(void *)&new_hooks, new_size),
0, "Unexpected extent_hooks install failure");
}
TEST_BEGIN(test_san_bump_alloc) {
test_skip_if(!maps_coalesce || !opt_retain);
@ -69,6 +113,48 @@ TEST_BEGIN(test_san_bump_alloc) {
}
TEST_END
TEST_BEGIN(test_failed_grow_preserves_curr_reg) {
test_skip_if(!maps_coalesce || !opt_retain);
tsdn_t *tsdn = tsdn_fetch();
san_bump_alloc_t sba;
san_bump_alloc_init(&sba);
unsigned arena_ind = do_arena_create(0, 0);
assert_u_ne(arena_ind, UINT_MAX, "Failed to create an arena");
install_san_bump_fail_alloc_hooks(arena_ind);
arena_t *arena = arena_get(tsdn, arena_ind, false);
pac_t *pac = &arena->pa_shard.pac;
size_t small_alloc_size = PAGE * 16;
edata_t *edata = san_bump_alloc(tsdn, &sba, pac, pac_ehooks_get(pac),
small_alloc_size, /* zero */ false);
expect_ptr_not_null(edata, "Initial san_bump allocation failed");
expect_ptr_not_null(sba.curr_reg,
"Expected retained region remainder after initial allocation");
fail_retained_alloc = true;
retained_alloc_fail_calls = 0;
edata_t *failed = san_bump_alloc(tsdn, &sba, pac, pac_ehooks_get(pac),
SBA_RETAINED_ALLOC_SIZE, /* zero */ false);
expect_ptr_null(failed, "Expected retained grow allocation failure");
expect_u_eq(retained_alloc_fail_calls, 1,
"Expected exactly one failed retained allocation attempt");
edata_t *reused = san_bump_alloc(tsdn, &sba, pac, pac_ehooks_get(pac),
small_alloc_size, /* zero */ false);
expect_ptr_not_null(
reused, "Expected allocator to reuse preexisting current region");
expect_u_eq(retained_alloc_fail_calls, 1,
"Reuse path should not attempt another retained grow allocation");
fail_retained_alloc = false;
}
TEST_END
TEST_BEGIN(test_large_alloc_size) {
test_skip_if(!maps_coalesce || !opt_retain);
@ -105,5 +191,6 @@ TEST_END
int
main(void) {
return test(test_san_bump_alloc, test_large_alloc_size);
return test(test_san_bump_alloc, test_failed_grow_preserves_curr_reg,
test_large_alloc_size);
}

View file

@ -195,9 +195,9 @@ TEST_BEGIN(test_tcache_max) {
global_test = true;
for (alloc_option = alloc_option_start; alloc_option < alloc_option_end;
alloc_option++) {
alloc_option++) {
for (dalloc_option = dalloc_option_start;
dalloc_option < dalloc_option_end; dalloc_option++) {
dalloc_option < dalloc_option_end; dalloc_option++) {
/* opt.tcache_max set to 1024 in tcache_max.sh. */
test_tcache_max_impl(1024, alloc_option, dalloc_option);
}
@ -206,6 +206,50 @@ TEST_BEGIN(test_tcache_max) {
}
TEST_END
TEST_BEGIN(test_large_tcache_nrequests_on_miss) {
test_skip_if(!config_stats);
test_skip_if(!opt_tcache);
test_skip_if(opt_prof);
test_skip_if(san_uaf_detection_enabled());
size_t large;
size_t sz = sizeof(large);
expect_d_eq(
mallctl("arenas.lextent.0.size", (void *)&large, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
expect_d_eq(mallctl("thread.tcache.max", NULL, NULL, (void *)&large,
sizeof(large)),
0, "Unexpected mallctl() failure");
expect_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), 0,
"Unexpected tcache flush failure");
tsd_t *tsd = tsd_fetch();
expect_ptr_not_null(tsd, "Unexpected tsd_fetch() failure");
tcache_t *tcache = tcache_get(tsd);
expect_ptr_not_null(tcache, "Expected auto tcache");
szind_t binind = sz_size2index(large);
expect_true(binind >= SC_NBINS, "Expected large size class");
cache_bin_t *bin = &tcache->bins[binind];
bin->tstats.nrequests = 0;
void *p = mallocx(large, 0);
expect_ptr_not_null(p, "Unexpected mallocx() failure");
expect_u64_eq(bin->tstats.nrequests, 1,
"Large tcache miss should count as one request");
dallocx(p, 0);
p = mallocx(large, 0);
expect_ptr_not_null(p, "Unexpected mallocx() failure");
expect_u64_eq(bin->tstats.nrequests, 2,
"Large tcache hit should increment request count again");
dallocx(p, 0);
expect_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), 0,
"Unexpected tcache flush failure");
}
TEST_END
static size_t
tcache_max2nbins(size_t tcache_max) {
return sz_size2index(tcache_max) + 1;
@ -318,9 +362,9 @@ tcache_check(void *arg) {
expect_zu_eq(tcache_nbins, tcache_max2nbins(new_tcache_max),
"Unexpected value for tcache_nbins");
for (unsigned alloc_option = alloc_option_start;
alloc_option < alloc_option_end; alloc_option++) {
alloc_option < alloc_option_end; alloc_option++) {
for (unsigned dalloc_option = dalloc_option_start;
dalloc_option < dalloc_option_end; dalloc_option++) {
dalloc_option < dalloc_option_end; dalloc_option++) {
test_tcache_max_impl(
new_tcache_max, alloc_option, dalloc_option);
}
@ -358,5 +402,6 @@ TEST_END
int
main(void) {
return test(test_tcache_max, test_thread_tcache_max);
return test(test_tcache_max, test_large_tcache_nrequests_on_miss,
test_thread_tcache_max);
}