diff --git a/include/jemalloc/internal/pa.h b/include/jemalloc/internal/pa.h index e1821e69..0df2b4b3 100644 --- a/include/jemalloc/internal/pa.h +++ b/include/jemalloc/internal/pa.h @@ -119,7 +119,10 @@ bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind, pa_shard_stats_t *stats, malloc_mutex_t *stats_mtx); size_t pa_shard_extent_sn_next(pa_shard_t *shard); -edata_t * -pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment, - bool slab, szind_t szind, bool *zero, size_t *mapped_add); +edata_t *pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, + size_t alignment, bool slab, szind_t szind, bool *zero, size_t *mapped_add); +/* Returns true on error, in which case nothing changed. */ +bool pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, + size_t new_usize, bool *zero, size_t *mapped_add); + #endif /* JEMALLOC_INTERNAL_PA_H */ diff --git a/src/large.c b/src/large.c index 26a1740c..ff43a8d1 100644 --- a/src/large.c +++ b/src/large.c @@ -101,57 +101,28 @@ static bool large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize, bool zero) { arena_t *arena = arena_get_from_edata(edata); - size_t oldusize = edata_usize_get(edata); - ehooks_t *ehooks = arena_get_ehooks(arena); - size_t trailsize = usize - oldusize; - - if (ehooks_merge_will_fail(ehooks)) { - return true; - } if (config_fill && unlikely(opt_zero)) { zero = true; } + + size_t old_usize = edata_usize_get(edata); + /* * Copy zero into is_zeroed_trail and pass the copy when allocating the * extent, so that it is possible to make correct zero fill decisions * below, even if is_zeroed_trail ends up true when zero is false. */ bool is_zeroed_trail = zero; - edata_t *trail; - bool new_mapping; - if ((trail = ecache_alloc(tsdn, &arena->pa_shard, ehooks, - &arena->pa_shard.ecache_dirty, edata_past_get(edata), trailsize, - CACHELINE, false, SC_NSIZES, &is_zeroed_trail)) != NULL - || (trail = ecache_alloc(tsdn, &arena->pa_shard, ehooks, - &arena->pa_shard.ecache_muzzy, edata_past_get(edata), trailsize, - CACHELINE, false, SC_NSIZES, &is_zeroed_trail)) != NULL) { - if (config_stats) { - new_mapping = false; - } - } else { - if ((trail = ecache_alloc_grow(tsdn, &arena->pa_shard, ehooks, - &arena->pa_shard.ecache_retained, edata_past_get(edata), - trailsize, CACHELINE, false, SC_NSIZES, &is_zeroed_trail)) - == NULL) { - return true; - } - if (config_stats) { - new_mapping = true; - } - } - - if (extent_merge_wrapper(tsdn, ehooks, &arena->pa_shard.edata_cache, - edata, trail)) { - extent_dalloc_wrapper(tsdn, &arena->pa_shard, ehooks, trail); + size_t mapped_add; + bool err = pa_expand(tsdn, &arena->pa_shard, edata, usize, + &is_zeroed_trail, &mapped_add); + if (err) { return true; } - szind_t szind = sz_size2index(usize); - emap_remap(tsdn, &emap_global, edata, szind, false); - - if (config_stats && new_mapping) { - pa_shard_stats_mapped_add(tsdn, &arena->pa_shard, trailsize); + if (config_stats && mapped_add > 0) { + pa_shard_stats_mapped_add(tsdn, &arena->pa_shard, mapped_add); } if (zero) { @@ -164,7 +135,7 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize, * of CACHELINE in [0 .. PAGE). */ void *zbase = (void *) - ((uintptr_t)edata_addr_get(edata) + oldusize); + ((uintptr_t)edata_addr_get(edata) + old_usize); void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase + PAGE)); size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase; @@ -173,7 +144,7 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize, } assert(is_zeroed_trail); } - arena_extent_ralloc_large_expand(tsdn, arena, edata, oldusize); + arena_extent_ralloc_large_expand(tsdn, arena, edata, old_usize); return false; } diff --git a/src/pa.c b/src/pa.c index 0dbf0445..7c3b568a 100644 --- a/src/pa.c +++ b/src/pa.c @@ -94,3 +94,43 @@ pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment, } return edata; } + +bool +pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t new_usize, + bool *zero, size_t *mapped_add) { + ehooks_t *ehooks = pa_shard_ehooks_get(shard); + size_t old_usize = edata_usize_get(edata); + size_t trail_size = new_usize - old_usize; + void *trail_begin = edata_past_get(edata); + + *mapped_add = 0; + if (ehooks_merge_will_fail(ehooks)) { + return true; + } + edata_t *trail = ecache_alloc(tsdn, shard, ehooks, &shard->ecache_dirty, + trail_begin, trail_size, PAGE, /* slab */ false, SC_NSIZES, zero); + if (trail == NULL) { + trail = ecache_alloc(tsdn, shard, ehooks, &shard->ecache_muzzy, + trail_begin, trail_size, PAGE, /* slab */ false, SC_NSIZES, + zero); + } + if (trail == NULL) { + trail = ecache_alloc_grow(tsdn, shard, ehooks, + &shard->ecache_retained, trail_begin, trail_size, PAGE, + /* slab */ false, SC_NSIZES, zero); + *mapped_add = trail_size; + } + if (trail == NULL) { + *mapped_add = 0; + return true; + } + if (extent_merge_wrapper(tsdn, ehooks, &shard->edata_cache, edata, + trail)) { + extent_dalloc_wrapper(tsdn, shard, ehooks, trail); + *mapped_add = 0; + return true; + } + szind_t szind = sz_size2index(new_usize); + emap_remap(tsdn, &emap_global, edata, szind, /* slab */ false); + return false; +}