mirror of
https://github.com/jemalloc/jemalloc.git
synced 2026-04-20 01:11:15 +03:00
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate) rather than directly reading e.g. arenas[ind]. Prior to the addition of the arenas.extend mallctl, the worst possible outcome of directly accessing arenas was a stale read, but arenas.extend may allocate and assign a new array to arenas. Add a tsd-based arenas_cache, which amortizes arenas reads. This introduces some subtle bootstrapping issues, with tsd_boot() now being split into tsd_boot[01]() to support tsd wrapper allocation bootstrapping, as well as an arenas_cache_bypass tsd variable which dynamically terminates allocation of arenas_cache itself. Promote a0malloc(), a0calloc(), and a0free() to be generally useful for internal allocation, and use them in several places (more may be appropriate). Abstract arena->nthreads management and fix a missing decrement during thread destruction (recent tsd refactoring left arenas_cleanup() unused). Change arena_choose() to propagate OOM, and handle OOM in all callers. This is important for providing consistent allocation behavior when the MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible for an OOM to result in allocation silently allocating from a different arena than the one specified.
This commit is contained in:
parent
bf40641c5c
commit
8bb3198f72
13 changed files with 745 additions and 352 deletions
526
src/jemalloc.c
526
src/jemalloc.c
|
|
@ -4,8 +4,6 @@
|
|||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
malloc_tsd_data(, arenas, arena_t *, NULL)
|
||||
|
||||
/* Runtime configuration options. */
|
||||
const char *je_malloc_conf JEMALLOC_ATTR(weak);
|
||||
bool opt_abort =
|
||||
|
|
@ -34,10 +32,20 @@ bool in_valgrind;
|
|||
|
||||
unsigned ncpus;
|
||||
|
||||
malloc_mutex_t arenas_lock;
|
||||
arena_t **arenas;
|
||||
unsigned narenas_total;
|
||||
unsigned narenas_auto;
|
||||
/* Protects arenas initialization (arenas, narenas_total). */
|
||||
static malloc_mutex_t arenas_lock;
|
||||
/*
|
||||
* Arenas that are used to service external requests. Not all elements of the
|
||||
* arenas array are necessarily used; arenas are created lazily as needed.
|
||||
*
|
||||
* arenas[0..narenas_auto) are used for automatic multiplexing of threads and
|
||||
* arenas. arenas[narenas_auto..narenas_total) are only used if the application
|
||||
* takes some action to create them and allocate from them.
|
||||
*/
|
||||
static arena_t **arenas;
|
||||
static unsigned narenas_total;
|
||||
static arena_t *a0; /* arenas[0]; read-only after initialization. */
|
||||
static unsigned narenas_auto; /* Read-only after initialization. */
|
||||
|
||||
/* Set to true once the allocator has been initialized. */
|
||||
static bool malloc_initialized = false;
|
||||
|
|
@ -144,35 +152,288 @@ static bool malloc_init_hard(void);
|
|||
* Begin miscellaneous support functions.
|
||||
*/
|
||||
|
||||
/* Create a new arena and insert it into the arenas array at index ind. */
|
||||
arena_t *
|
||||
arenas_extend(unsigned ind)
|
||||
JEMALLOC_ALWAYS_INLINE_C void
|
||||
malloc_thread_init(void)
|
||||
{
|
||||
arena_t *ret;
|
||||
|
||||
ret = (arena_t *)base_alloc(sizeof(arena_t));
|
||||
if (ret != NULL && !arena_new(ret, ind)) {
|
||||
arenas[ind] = ret;
|
||||
return (ret);
|
||||
}
|
||||
/* Only reached if there is an OOM error. */
|
||||
|
||||
/*
|
||||
* OOM here is quite inconvenient to propagate, since dealing with it
|
||||
* would require a check for failure in the fast path. Instead, punt
|
||||
* by using arenas[0]. In practice, this is an extremely unlikely
|
||||
* failure.
|
||||
* TSD initialization can't be safely done as a side effect of
|
||||
* deallocation, because it is possible for a thread to do nothing but
|
||||
* deallocate its TLS data via free(), in which case writing to TLS
|
||||
* would cause write-after-free memory corruption. The quarantine
|
||||
* facility *only* gets used as a side effect of deallocation, so make
|
||||
* a best effort attempt at initializing its TSD by hooking all
|
||||
* allocation events.
|
||||
*/
|
||||
malloc_write("<jemalloc>: Error initializing arena\n");
|
||||
if (opt_abort)
|
||||
abort();
|
||||
|
||||
return (arenas[0]);
|
||||
if (config_fill && unlikely(opt_quarantine))
|
||||
quarantine_alloc_hook();
|
||||
}
|
||||
|
||||
/* Slow path, called only by choose_arena(). */
|
||||
JEMALLOC_ALWAYS_INLINE_C bool
|
||||
malloc_init(void)
|
||||
{
|
||||
|
||||
if (unlikely(!malloc_initialized) && malloc_init_hard())
|
||||
return (true);
|
||||
malloc_thread_init();
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
/*
|
||||
* The a0*() functions are used instead of i[mcd]alloc() in bootstrap-sensitive
|
||||
* situations that cannot tolerate TLS variable access. These functions are
|
||||
* also exposed for use in static binaries on FreeBSD, hence the old-style
|
||||
* malloc() API.
|
||||
*/
|
||||
|
||||
arena_t *
|
||||
choose_arena_hard(tsd_t *tsd)
|
||||
a0get(void)
|
||||
{
|
||||
|
||||
assert(a0 != NULL);
|
||||
return (a0);
|
||||
}
|
||||
|
||||
static void *
|
||||
a0alloc(size_t size, bool zero)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
if (unlikely(malloc_init()))
|
||||
return (NULL);
|
||||
|
||||
if (size == 0)
|
||||
size = 1;
|
||||
|
||||
if (size <= arena_maxclass)
|
||||
ret = arena_malloc(NULL, a0get(), size, zero, false);
|
||||
else
|
||||
ret = huge_malloc(NULL, a0get(), size, zero);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void *
|
||||
a0malloc(size_t size)
|
||||
{
|
||||
|
||||
return (a0alloc(size, false));
|
||||
}
|
||||
|
||||
void *
|
||||
a0calloc(size_t num, size_t size)
|
||||
{
|
||||
|
||||
return (a0alloc(num * size, true));
|
||||
}
|
||||
|
||||
void
|
||||
a0free(void *ptr)
|
||||
{
|
||||
arena_chunk_t *chunk;
|
||||
|
||||
if (ptr == NULL)
|
||||
return;
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
if (chunk != ptr)
|
||||
arena_dalloc(NULL, chunk, ptr, false);
|
||||
else
|
||||
huge_dalloc(ptr);
|
||||
}
|
||||
|
||||
/* Create a new arena and insert it into the arenas array at index ind. */
|
||||
arena_t *
|
||||
arena_init(unsigned ind)
|
||||
{
|
||||
arena_t *arena;
|
||||
|
||||
malloc_mutex_lock(&arenas_lock);
|
||||
|
||||
/* Expand arenas if necessary. */
|
||||
assert(ind <= narenas_total);
|
||||
if (ind == narenas_total) {
|
||||
unsigned narenas_new = narenas_total + 1;
|
||||
arena_t **arenas_new =
|
||||
(arena_t **)a0malloc(CACHELINE_CEILING(narenas_new *
|
||||
sizeof(arena_t *)));
|
||||
if (arenas_new == NULL) {
|
||||
arena = NULL;
|
||||
goto label_return;
|
||||
}
|
||||
memcpy(arenas_new, arenas, narenas_total * sizeof(arena_t *));
|
||||
arenas_new[ind] = NULL;
|
||||
/*
|
||||
* Deallocate only if arenas came from a0malloc() (not
|
||||
* base_alloc()).
|
||||
*/
|
||||
if (narenas_total != narenas_auto)
|
||||
a0free(arenas);
|
||||
arenas = arenas_new;
|
||||
narenas_total = narenas_new;
|
||||
}
|
||||
|
||||
/*
|
||||
* Another thread may have already initialized arenas[ind] if it's an
|
||||
* auto arena.
|
||||
*/
|
||||
arena = arenas[ind];
|
||||
if (arena != NULL) {
|
||||
assert(ind < narenas_auto);
|
||||
goto label_return;
|
||||
}
|
||||
|
||||
/* Actually initialize the arena. */
|
||||
arena = arenas[ind] = arena_new(ind);
|
||||
label_return:
|
||||
malloc_mutex_unlock(&arenas_lock);
|
||||
return (arena);
|
||||
}
|
||||
|
||||
unsigned
|
||||
narenas_total_get(void)
|
||||
{
|
||||
unsigned narenas;
|
||||
|
||||
malloc_mutex_lock(&arenas_lock);
|
||||
narenas = narenas_total;
|
||||
malloc_mutex_unlock(&arenas_lock);
|
||||
|
||||
return (narenas);
|
||||
}
|
||||
|
||||
static void
|
||||
arena_bind_locked(tsd_t *tsd, unsigned ind)
|
||||
{
|
||||
arena_t *arena;
|
||||
|
||||
arena = arenas[ind];
|
||||
arena->nthreads++;
|
||||
|
||||
if (tsd_nominal(tsd))
|
||||
tsd_arena_set(tsd, arena);
|
||||
}
|
||||
|
||||
static void
|
||||
arena_bind(tsd_t *tsd, unsigned ind)
|
||||
{
|
||||
|
||||
malloc_mutex_lock(&arenas_lock);
|
||||
arena_bind_locked(tsd, ind);
|
||||
malloc_mutex_unlock(&arenas_lock);
|
||||
}
|
||||
|
||||
void
|
||||
arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
|
||||
{
|
||||
arena_t *oldarena, *newarena;
|
||||
|
||||
malloc_mutex_lock(&arenas_lock);
|
||||
oldarena = arenas[oldind];
|
||||
newarena = arenas[newind];
|
||||
oldarena->nthreads--;
|
||||
newarena->nthreads++;
|
||||
malloc_mutex_unlock(&arenas_lock);
|
||||
tsd_arena_set(tsd, newarena);
|
||||
}
|
||||
|
||||
unsigned
|
||||
arena_nbound(unsigned ind)
|
||||
{
|
||||
unsigned nthreads;
|
||||
|
||||
malloc_mutex_lock(&arenas_lock);
|
||||
nthreads = arenas[ind]->nthreads;
|
||||
malloc_mutex_unlock(&arenas_lock);
|
||||
return (nthreads);
|
||||
}
|
||||
|
||||
static void
|
||||
arena_unbind(tsd_t *tsd, unsigned ind)
|
||||
{
|
||||
arena_t *arena;
|
||||
|
||||
malloc_mutex_lock(&arenas_lock);
|
||||
arena = arenas[ind];
|
||||
arena->nthreads--;
|
||||
malloc_mutex_unlock(&arenas_lock);
|
||||
tsd_arena_set(tsd, NULL);
|
||||
}
|
||||
|
||||
arena_t *
|
||||
arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing)
|
||||
{
|
||||
arena_t *arena;
|
||||
arena_t **arenas_cache = tsd_arenas_cache_get(tsd);
|
||||
unsigned narenas_cache = tsd_narenas_cache_get(tsd);
|
||||
unsigned narenas_actual = narenas_total_get();
|
||||
|
||||
/* Deallocate old cache if it's too small. */
|
||||
if (arenas_cache != NULL && narenas_cache < narenas_actual) {
|
||||
a0free(arenas_cache);
|
||||
arenas_cache = NULL;
|
||||
narenas_cache = 0;
|
||||
tsd_arenas_cache_set(tsd, arenas_cache);
|
||||
tsd_narenas_cache_set(tsd, narenas_cache);
|
||||
}
|
||||
|
||||
/* Allocate cache if it's missing. */
|
||||
if (arenas_cache == NULL) {
|
||||
bool *arenas_cache_bypassp = tsd_arenas_cache_bypassp_get(tsd);
|
||||
assert(ind < narenas_actual || !init_if_missing);
|
||||
narenas_cache = (ind < narenas_actual) ? narenas_actual : ind+1;
|
||||
|
||||
if (!*arenas_cache_bypassp) {
|
||||
*arenas_cache_bypassp = true;
|
||||
arenas_cache = (arena_t **)a0malloc(sizeof(arena_t *) *
|
||||
narenas_cache);
|
||||
*arenas_cache_bypassp = false;
|
||||
} else
|
||||
arenas_cache = NULL;
|
||||
if (arenas_cache == NULL) {
|
||||
/*
|
||||
* This function must always tell the truth, even if
|
||||
* it's slow, so don't let OOM or recursive allocation
|
||||
* avoidance (note arenas_cache_bypass check) get in the
|
||||
* way.
|
||||
*/
|
||||
if (ind >= narenas_actual)
|
||||
return (NULL);
|
||||
malloc_mutex_lock(&arenas_lock);
|
||||
arena = arenas[ind];
|
||||
malloc_mutex_unlock(&arenas_lock);
|
||||
return (arena);
|
||||
}
|
||||
tsd_arenas_cache_set(tsd, arenas_cache);
|
||||
tsd_narenas_cache_set(tsd, narenas_cache);
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy to cache. It's possible that the actual number of arenas has
|
||||
* increased since narenas_total_get() was called above, but that causes
|
||||
* no correctness issues unless two threads concurrently execute the
|
||||
* arenas.extend mallctl, which we trust mallctl synchronization to
|
||||
* prevent.
|
||||
*/
|
||||
malloc_mutex_lock(&arenas_lock);
|
||||
memcpy(arenas_cache, arenas, sizeof(arena_t *) * narenas_actual);
|
||||
malloc_mutex_unlock(&arenas_lock);
|
||||
if (narenas_cache > narenas_actual) {
|
||||
memset(&arenas_cache[narenas_actual], 0, sizeof(arena_t *) *
|
||||
(narenas_cache - narenas_actual));
|
||||
}
|
||||
|
||||
/* Read the refreshed cache, and init the arena if necessary. */
|
||||
arena = arenas_cache[ind];
|
||||
if (init_if_missing && arena == NULL)
|
||||
arena = arenas_cache[ind] = arena_init(ind);
|
||||
return (arena);
|
||||
}
|
||||
|
||||
/* Slow path, called only by arena_choose(). */
|
||||
arena_t *
|
||||
arena_choose_hard(tsd_t *tsd)
|
||||
{
|
||||
arena_t *ret;
|
||||
|
||||
|
|
@ -182,7 +443,7 @@ choose_arena_hard(tsd_t *tsd)
|
|||
choose = 0;
|
||||
first_null = narenas_auto;
|
||||
malloc_mutex_lock(&arenas_lock);
|
||||
assert(arenas[0] != NULL);
|
||||
assert(a0get() != NULL);
|
||||
for (i = 1; i < narenas_auto; i++) {
|
||||
if (arenas[i] != NULL) {
|
||||
/*
|
||||
|
|
@ -215,20 +476,20 @@ choose_arena_hard(tsd_t *tsd)
|
|||
ret = arenas[choose];
|
||||
} else {
|
||||
/* Initialize a new arena. */
|
||||
ret = arenas_extend(first_null);
|
||||
choose = first_null;
|
||||
ret = arena_init(choose);
|
||||
if (ret == NULL) {
|
||||
malloc_mutex_unlock(&arenas_lock);
|
||||
return (NULL);
|
||||
}
|
||||
}
|
||||
ret->nthreads++;
|
||||
arena_bind_locked(tsd, choose);
|
||||
malloc_mutex_unlock(&arenas_lock);
|
||||
} else {
|
||||
ret = arenas[0];
|
||||
malloc_mutex_lock(&arenas_lock);
|
||||
ret->nthreads++;
|
||||
malloc_mutex_unlock(&arenas_lock);
|
||||
ret = a0get();
|
||||
arena_bind(tsd, 0);
|
||||
}
|
||||
|
||||
if (tsd_nominal(tsd))
|
||||
tsd_arena_set(tsd, ret);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
|
|
@ -248,6 +509,33 @@ thread_deallocated_cleanup(tsd_t *tsd)
|
|||
|
||||
void
|
||||
arena_cleanup(tsd_t *tsd)
|
||||
{
|
||||
arena_t *arena;
|
||||
|
||||
arena = tsd_arena_get(tsd);
|
||||
if (arena != NULL)
|
||||
arena_unbind(tsd, arena->ind);
|
||||
}
|
||||
|
||||
void
|
||||
arenas_cache_cleanup(tsd_t *tsd)
|
||||
{
|
||||
arena_t **arenas_cache;
|
||||
|
||||
arenas_cache = tsd_arenas_cache_get(tsd);
|
||||
if (arenas != NULL)
|
||||
a0free(arenas_cache);
|
||||
}
|
||||
|
||||
void
|
||||
narenas_cache_cleanup(tsd_t *tsd)
|
||||
{
|
||||
|
||||
/* Do nothing. */
|
||||
}
|
||||
|
||||
void
|
||||
arenas_cache_bypass_cleanup(tsd_t *tsd)
|
||||
{
|
||||
|
||||
/* Do nothing. */
|
||||
|
|
@ -312,44 +600,6 @@ malloc_ncpus(void)
|
|||
return ((result == -1) ? 1 : (unsigned)result);
|
||||
}
|
||||
|
||||
void
|
||||
arenas_cleanup(void *arg)
|
||||
{
|
||||
arena_t *arena = *(arena_t **)arg;
|
||||
|
||||
malloc_mutex_lock(&arenas_lock);
|
||||
arena->nthreads--;
|
||||
malloc_mutex_unlock(&arenas_lock);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C void
|
||||
malloc_thread_init(void)
|
||||
{
|
||||
|
||||
/*
|
||||
* TSD initialization can't be safely done as a side effect of
|
||||
* deallocation, because it is possible for a thread to do nothing but
|
||||
* deallocate its TLS data via free(), in which case writing to TLS
|
||||
* would cause write-after-free memory corruption. The quarantine
|
||||
* facility *only* gets used as a side effect of deallocation, so make
|
||||
* a best effort attempt at initializing its TSD by hooking all
|
||||
* allocation events.
|
||||
*/
|
||||
if (config_fill && unlikely(opt_quarantine))
|
||||
quarantine_alloc_hook();
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C bool
|
||||
malloc_init(void)
|
||||
{
|
||||
|
||||
if (unlikely(!malloc_initialized) && malloc_init_hard())
|
||||
return (true);
|
||||
malloc_thread_init();
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
static bool
|
||||
malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
|
||||
char const **v_p, size_t *vlen_p)
|
||||
|
|
@ -745,7 +995,7 @@ malloc_init_hard(void)
|
|||
#endif
|
||||
malloc_initializer = INITIALIZER;
|
||||
|
||||
if (malloc_tsd_boot()) {
|
||||
if (malloc_tsd_boot0()) {
|
||||
malloc_mutex_unlock(&init_lock);
|
||||
return (true);
|
||||
}
|
||||
|
|
@ -809,10 +1059,10 @@ malloc_init_hard(void)
|
|||
|
||||
/*
|
||||
* Initialize one arena here. The rest are lazily created in
|
||||
* choose_arena_hard().
|
||||
* arena_choose_hard().
|
||||
*/
|
||||
arenas_extend(0);
|
||||
if (arenas[0] == NULL) {
|
||||
a0 = arena_init(0);
|
||||
if (a0 == NULL) {
|
||||
malloc_mutex_unlock(&init_lock);
|
||||
return (true);
|
||||
}
|
||||
|
|
@ -887,6 +1137,7 @@ malloc_init_hard(void)
|
|||
|
||||
malloc_initialized = true;
|
||||
malloc_mutex_unlock(&init_lock);
|
||||
malloc_tsd_boot1();
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
|
@ -1428,8 +1679,8 @@ JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
|
|||
* Begin non-standard functions.
|
||||
*/
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C void
|
||||
imallocx_flags_decode_hard(size_t size, int flags, size_t *usize,
|
||||
JEMALLOC_ALWAYS_INLINE_C bool
|
||||
imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize,
|
||||
size_t *alignment, bool *zero, bool *try_tcache, arena_t **arena)
|
||||
{
|
||||
|
||||
|
|
@ -1444,16 +1695,19 @@ imallocx_flags_decode_hard(size_t size, int flags, size_t *usize,
|
|||
if ((flags & MALLOCX_ARENA_MASK) != 0) {
|
||||
unsigned arena_ind = MALLOCX_ARENA_GET(flags);
|
||||
*try_tcache = false;
|
||||
*arena = arenas[arena_ind];
|
||||
*arena = arena_get(tsd, arena_ind, true, true);
|
||||
if (unlikely(*arena == NULL))
|
||||
return (true);
|
||||
} else {
|
||||
*try_tcache = true;
|
||||
*arena = NULL;
|
||||
}
|
||||
return (false);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C void
|
||||
imallocx_flags_decode(size_t size, int flags, size_t *usize, size_t *alignment,
|
||||
bool *zero, bool *try_tcache, arena_t **arena)
|
||||
JEMALLOC_ALWAYS_INLINE_C bool
|
||||
imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
|
||||
size_t *alignment, bool *zero, bool *try_tcache, arena_t **arena)
|
||||
{
|
||||
|
||||
if (likely(flags == 0)) {
|
||||
|
|
@ -1463,9 +1717,10 @@ imallocx_flags_decode(size_t size, int flags, size_t *usize, size_t *alignment,
|
|||
*zero = false;
|
||||
*try_tcache = true;
|
||||
*arena = NULL;
|
||||
return (false);
|
||||
} else {
|
||||
imallocx_flags_decode_hard(size, flags, usize, alignment, zero,
|
||||
try_tcache, arena);
|
||||
return (imallocx_flags_decode_hard(tsd, size, flags, usize,
|
||||
alignment, zero, try_tcache, arena));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1524,8 +1779,9 @@ imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
|
|||
arena_t *arena;
|
||||
prof_tctx_t *tctx;
|
||||
|
||||
imallocx_flags_decode(size, flags, usize, &alignment, &zero,
|
||||
&try_tcache, &arena);
|
||||
if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
|
||||
&zero, &try_tcache, &arena)))
|
||||
return (NULL);
|
||||
tctx = prof_alloc_prep(tsd, *usize, true);
|
||||
if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
|
||||
p = imallocx_maybe_flags(tsd, size, flags, *usize, alignment,
|
||||
|
|
@ -1558,8 +1814,9 @@ imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
|
|||
return (imalloc(tsd, size));
|
||||
}
|
||||
|
||||
imallocx_flags_decode_hard(size, flags, usize, &alignment, &zero,
|
||||
&try_tcache, &arena);
|
||||
if (unlikely(imallocx_flags_decode_hard(tsd, size, flags, usize,
|
||||
&alignment, &zero, &try_tcache, &arena)))
|
||||
return (NULL);
|
||||
return (imallocx_flags(tsd, *usize, alignment, zero, try_tcache,
|
||||
arena));
|
||||
}
|
||||
|
|
@ -1685,9 +1942,10 @@ je_rallocx(void *ptr, size_t size, int flags)
|
|||
arena_chunk_t *chunk;
|
||||
try_tcache_alloc = false;
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
try_tcache_dalloc = (chunk == ptr || chunk->arena !=
|
||||
arenas[arena_ind]);
|
||||
arena = arenas[arena_ind];
|
||||
arena = arena_get(tsd, arena_ind, true, true);
|
||||
if (unlikely(arena == NULL))
|
||||
goto label_oom;
|
||||
try_tcache_dalloc = (chunk == ptr || chunk->arena != arena);
|
||||
} else {
|
||||
try_tcache_alloc = true;
|
||||
try_tcache_dalloc = true;
|
||||
|
|
@ -1825,6 +2083,7 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
|
|||
|
||||
if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
|
||||
unsigned arena_ind = MALLOCX_ARENA_GET(flags);
|
||||
// XX Dangerous arenas read.
|
||||
arena = arenas[arena_ind];
|
||||
} else
|
||||
arena = NULL;
|
||||
|
|
@ -1875,16 +2134,24 @@ je_sallocx(const void *ptr, int flags)
|
|||
void
|
||||
je_dallocx(void *ptr, int flags)
|
||||
{
|
||||
tsd_t *tsd;
|
||||
bool try_tcache;
|
||||
|
||||
assert(ptr != NULL);
|
||||
assert(malloc_initialized || IS_INITIALIZER);
|
||||
|
||||
tsd = tsd_fetch();
|
||||
if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
|
||||
unsigned arena_ind = MALLOCX_ARENA_GET(flags);
|
||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
try_tcache = (chunk == ptr || chunk->arena !=
|
||||
arenas[arena_ind]);
|
||||
arena_t *arena = arena_get(tsd, arena_ind, true, true);
|
||||
/*
|
||||
* If arena is NULL, the application passed an arena that has
|
||||
* never been used before, which is unsupported during
|
||||
* deallocation.
|
||||
*/
|
||||
assert(arena != NULL);
|
||||
try_tcache = (chunk == ptr || chunk->arena != arena);
|
||||
} else
|
||||
try_tcache = true;
|
||||
|
||||
|
|
@ -1908,6 +2175,7 @@ inallocx(size_t size, int flags)
|
|||
void
|
||||
je_sdallocx(void *ptr, size_t size, int flags)
|
||||
{
|
||||
tsd_t *tsd;
|
||||
bool try_tcache;
|
||||
size_t usize;
|
||||
|
||||
|
|
@ -1916,16 +2184,22 @@ je_sdallocx(void *ptr, size_t size, int flags)
|
|||
usize = inallocx(size, flags);
|
||||
assert(usize == isalloc(ptr, config_prof));
|
||||
|
||||
tsd = tsd_fetch();
|
||||
if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
|
||||
unsigned arena_ind = MALLOCX_ARENA_GET(flags);
|
||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
try_tcache = (chunk == ptr || chunk->arena !=
|
||||
arenas[arena_ind]);
|
||||
arena_t *arena = arena_get(tsd, arena_ind, true, true);
|
||||
/*
|
||||
* If arena is NULL, the application passed an arena that has
|
||||
* never been used before, which is unsupported during
|
||||
* deallocation.
|
||||
*/
|
||||
try_tcache = (chunk == ptr || chunk->arena != arena);
|
||||
} else
|
||||
try_tcache = true;
|
||||
|
||||
UTRACE(ptr, 0, 0);
|
||||
isfree(tsd_fetch(), ptr, usize, try_tcache);
|
||||
isfree(tsd, ptr, usize, try_tcache);
|
||||
}
|
||||
|
||||
size_t
|
||||
|
|
@ -2105,55 +2379,3 @@ jemalloc_postfork_child(void)
|
|||
}
|
||||
|
||||
/******************************************************************************/
|
||||
/*
|
||||
* The following functions are used for TLS allocation/deallocation in static
|
||||
* binaries on FreeBSD. The primary difference between these and i[mcd]alloc()
|
||||
* is that these avoid accessing TLS variables.
|
||||
*/
|
||||
|
||||
static void *
|
||||
a0alloc(size_t size, bool zero)
|
||||
{
|
||||
|
||||
if (unlikely(malloc_init()))
|
||||
return (NULL);
|
||||
|
||||
if (size == 0)
|
||||
size = 1;
|
||||
|
||||
if (size <= arena_maxclass)
|
||||
return (arena_malloc(NULL, arenas[0], size, zero, false));
|
||||
else
|
||||
return (huge_malloc(NULL, arenas[0], size, zero));
|
||||
}
|
||||
|
||||
void *
|
||||
a0malloc(size_t size)
|
||||
{
|
||||
|
||||
return (a0alloc(size, false));
|
||||
}
|
||||
|
||||
void *
|
||||
a0calloc(size_t num, size_t size)
|
||||
{
|
||||
|
||||
return (a0alloc(num * size, true));
|
||||
}
|
||||
|
||||
void
|
||||
a0free(void *ptr)
|
||||
{
|
||||
arena_chunk_t *chunk;
|
||||
|
||||
if (ptr == NULL)
|
||||
return;
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
if (chunk != ptr)
|
||||
arena_dalloc(NULL, chunk, ptr, false);
|
||||
else
|
||||
huge_dalloc(ptr);
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue