diff --git a/include/jemalloc/internal/arena_inlines_b.h b/include/jemalloc/internal/arena_inlines_b.h index 79478136..eb82e716 100644 --- a/include/jemalloc/internal/arena_inlines_b.h +++ b/include/jemalloc/internal/arena_inlines_b.h @@ -188,15 +188,11 @@ arena_aalloc(tsdn_t *tsdn, const void *ptr) { JEMALLOC_ALWAYS_INLINE size_t arena_salloc(tsdn_t *tsdn, const void *ptr) { assert(ptr != NULL); + alloc_ctx_t alloc_ctx; + emap_alloc_info_lookup(tsdn, &emap_global, ptr, &alloc_ctx); + assert(alloc_ctx.szind != SC_NSIZES); - rtree_ctx_t rtree_ctx_fallback; - rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); - - szind_t szind = rtree_szind_read(tsdn, &emap_global.rtree, rtree_ctx, - (uintptr_t)ptr, true); - assert(szind != SC_NSIZES); - - return sz_index2size(szind); + return sz_index2size(alloc_ctx.szind); } JEMALLOC_ALWAYS_INLINE size_t @@ -210,26 +206,24 @@ arena_vsalloc(tsdn_t *tsdn, const void *ptr) { * failure. */ - rtree_ctx_t rtree_ctx_fallback; - rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); - - edata_t *edata; - szind_t szind; - if (rtree_edata_szind_read(tsdn, &emap_global.rtree, rtree_ctx, - (uintptr_t)ptr, false, &edata, &szind)) { + emap_full_alloc_ctx_t full_alloc_ctx; + bool missing = emap_full_alloc_info_try_lookup(tsdn, &emap_global, ptr, + &full_alloc_ctx); + if (missing) { return 0; } - if (edata == NULL) { + if (full_alloc_ctx.edata == NULL) { return 0; } - assert(edata_state_get(edata) == extent_state_active); + assert(edata_state_get(full_alloc_ctx.edata) == extent_state_active); /* Only slab members should be looked up via interior pointers. */ - assert(edata_addr_get(edata) == ptr || edata_slab_get(edata)); + assert(edata_addr_get(full_alloc_ctx.edata) == ptr + || edata_slab_get(full_alloc_ctx.edata)); - assert(szind != SC_NSIZES); + assert(full_alloc_ctx.szind != SC_NSIZES); - return sz_index2size(szind); + return sz_index2size(full_alloc_ctx.szind); } static inline void @@ -246,27 +240,21 @@ static inline void arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) { assert(ptr != NULL); - rtree_ctx_t rtree_ctx_fallback; - rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); - - szind_t szind; - bool slab; - rtree_szind_slab_read(tsdn, &emap_global.rtree, rtree_ctx, - (uintptr_t)ptr, true, &szind, &slab); + alloc_ctx_t alloc_ctx; + emap_alloc_info_lookup(tsdn, &emap_global, ptr, &alloc_ctx); if (config_debug) { - edata_t *edata = rtree_edata_read(tsdn, &emap_global.rtree, - rtree_ctx, (uintptr_t)ptr, true); - assert(szind == edata_szind_get(edata)); - assert(szind < SC_NSIZES); - assert(slab == edata_slab_get(edata)); + edata_t *edata = emap_lookup(tsdn, &emap_global, ptr); + assert(alloc_ctx.szind == edata_szind_get(edata)); + assert(alloc_ctx.szind < SC_NSIZES); + assert(alloc_ctx.slab == edata_slab_get(edata)); } - if (likely(slab)) { + if (likely(alloc_ctx.slab)) { /* Small allocation. */ arena_dalloc_small(tsdn, ptr); } else { - arena_dalloc_large_no_tcache(tsdn, ptr, szind); + arena_dalloc_large_no_tcache(tsdn, ptr, alloc_ctx.szind); } } @@ -288,7 +276,7 @@ arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind, JEMALLOC_ALWAYS_INLINE void arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, - alloc_ctx_t *alloc_ctx, bool slow_path) { + alloc_ctx_t *caller_alloc_ctx, bool slow_path) { assert(!tsdn_null(tsdn) || tcache == NULL); assert(ptr != NULL); @@ -297,34 +285,28 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, return; } - szind_t szind; - bool slab; - rtree_ctx_t *rtree_ctx; - if (alloc_ctx != NULL) { - szind = alloc_ctx->szind; - slab = alloc_ctx->slab; - assert(szind != SC_NSIZES); + alloc_ctx_t alloc_ctx; + if (caller_alloc_ctx != NULL) { + alloc_ctx = *caller_alloc_ctx; } else { - rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn)); - rtree_szind_slab_read(tsdn, &emap_global.rtree, rtree_ctx, - (uintptr_t)ptr, true, &szind, &slab); + util_assume(!tsdn_null(tsdn)); + emap_alloc_info_lookup(tsdn, &emap_global, ptr, &alloc_ctx); } if (config_debug) { - rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn)); - edata_t *edata = rtree_edata_read(tsdn, &emap_global.rtree, - rtree_ctx, (uintptr_t)ptr, true); - assert(szind == edata_szind_get(edata)); - assert(szind < SC_NSIZES); - assert(slab == edata_slab_get(edata)); + edata_t *edata = emap_lookup(tsdn, &emap_global, ptr); + assert(alloc_ctx.szind == edata_szind_get(edata)); + assert(alloc_ctx.szind < SC_NSIZES); + assert(alloc_ctx.slab == edata_slab_get(edata)); } - if (likely(slab)) { + if (likely(alloc_ctx.slab)) { /* Small allocation. */ - tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind, - slow_path); + tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, + alloc_ctx.szind, slow_path); } else { - arena_dalloc_large(tsdn, ptr, tcache, szind, slow_path); + arena_dalloc_large(tsdn, ptr, tcache, alloc_ctx.szind, + slow_path); } } @@ -333,47 +315,41 @@ arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) { assert(ptr != NULL); assert(size <= SC_LARGE_MAXCLASS); - szind_t szind; - bool slab; + alloc_ctx_t alloc_ctx; if (!config_prof || !opt_prof) { /* * There is no risk of being confused by a promoted sampled * object, so base szind and slab on the given size. */ - szind = sz_size2index(size); - slab = (szind < SC_NBINS); + alloc_ctx.szind = sz_size2index(size); + alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS); } if ((config_prof && opt_prof) || config_debug) { - rtree_ctx_t rtree_ctx_fallback; - rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, - &rtree_ctx_fallback); + emap_alloc_info_lookup(tsdn, &emap_global, ptr, &alloc_ctx); - rtree_szind_slab_read(tsdn, &emap_global.rtree, rtree_ctx, - (uintptr_t)ptr, true, &szind, &slab); - - assert(szind == sz_size2index(size)); - assert((config_prof && opt_prof) || slab == (szind < SC_NBINS)); + assert(alloc_ctx.szind == sz_size2index(size)); + assert((config_prof && opt_prof) + || alloc_ctx.slab == (alloc_ctx.szind < SC_NBINS)); if (config_debug) { - edata_t *edata = rtree_edata_read(tsdn, - &emap_global.rtree, rtree_ctx, (uintptr_t)ptr, true); - assert(szind == edata_szind_get(edata)); - assert(slab == edata_slab_get(edata)); + edata_t *edata = emap_lookup(tsdn, &emap_global, ptr); + assert(alloc_ctx.szind == edata_szind_get(edata)); + assert(alloc_ctx.slab == edata_slab_get(edata)); } } - if (likely(slab)) { + if (likely(alloc_ctx.slab)) { /* Small allocation. */ arena_dalloc_small(tsdn, ptr); } else { - arena_dalloc_large_no_tcache(tsdn, ptr, szind); + arena_dalloc_large_no_tcache(tsdn, ptr, alloc_ctx.szind); } } JEMALLOC_ALWAYS_INLINE void arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, - alloc_ctx_t *alloc_ctx, bool slow_path) { + alloc_ctx_t *caller_alloc_ctx, bool slow_path) { assert(!tsdn_null(tsdn) || tcache == NULL); assert(ptr != NULL); assert(size <= SC_LARGE_MAXCLASS); @@ -383,48 +359,38 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, return; } - szind_t szind; - bool slab; - alloc_ctx_t local_ctx; + alloc_ctx_t alloc_ctx; if (config_prof && opt_prof) { - if (alloc_ctx == NULL) { + if (caller_alloc_ctx == NULL) { /* Uncommon case and should be a static check. */ - rtree_ctx_t rtree_ctx_fallback; - rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, - &rtree_ctx_fallback); - rtree_szind_slab_read(tsdn, &emap_global.rtree, - rtree_ctx, (uintptr_t)ptr, true, &local_ctx.szind, - &local_ctx.slab); - assert(local_ctx.szind == sz_size2index(size)); - alloc_ctx = &local_ctx; + emap_alloc_info_lookup(tsdn, &emap_global, ptr, + &alloc_ctx); + assert(alloc_ctx.szind == sz_size2index(size)); + } else { + alloc_ctx = *caller_alloc_ctx; } - slab = alloc_ctx->slab; - szind = alloc_ctx->szind; } else { /* * There is no risk of being confused by a promoted sampled * object, so base szind and slab on the given size. */ - szind = sz_size2index(size); - slab = (szind < SC_NBINS); + alloc_ctx.szind = sz_size2index(size); + alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS); } if (config_debug) { - rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn)); - rtree_szind_slab_read(tsdn, &emap_global.rtree, rtree_ctx, - (uintptr_t)ptr, true, &szind, &slab); - edata_t *edata = rtree_edata_read(tsdn, - &emap_global.rtree, rtree_ctx, (uintptr_t)ptr, true); - assert(szind == edata_szind_get(edata)); - assert(slab == edata_slab_get(edata)); + edata_t *edata = emap_lookup(tsdn, &emap_global, ptr); + assert(alloc_ctx.szind == edata_szind_get(edata)); + assert(alloc_ctx.slab == edata_slab_get(edata)); } - if (likely(slab)) { + if (likely(alloc_ctx.slab)) { /* Small allocation. */ - tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind, - slow_path); + tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, + alloc_ctx.szind, slow_path); } else { - arena_dalloc_large(tsdn, ptr, tcache, szind, slow_path); + arena_dalloc_large(tsdn, ptr, tcache, alloc_ctx.szind, + slow_path); } } diff --git a/include/jemalloc/internal/emap.h b/include/jemalloc/internal/emap.h index 3a8182d3..89bb9684 100644 --- a/include/jemalloc/internal/emap.h +++ b/include/jemalloc/internal/emap.h @@ -18,6 +18,13 @@ struct alloc_ctx_t { bool slab; }; +typedef struct emap_full_alloc_ctx_s emap_full_alloc_ctx_t; +struct emap_full_alloc_ctx_s { + szind_t szind; + bool slab; + edata_t *edata; +}; + extern emap_t emap_global; bool emap_init(emap_t *emap); @@ -136,7 +143,7 @@ emap_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr) { /* Fills in alloc_ctx with the info in the map. */ JEMALLOC_ALWAYS_INLINE void -emap_alloc_info_lookup(tsdn_t *tsdn, emap_t *emap, void *ptr, +emap_alloc_info_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr, alloc_ctx_t *alloc_ctx) { rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); @@ -145,6 +152,34 @@ emap_alloc_info_lookup(tsdn_t *tsdn, emap_t *emap, void *ptr, true, &alloc_ctx->szind, &alloc_ctx->slab); } +/* The pointer must be mapped. */ +JEMALLOC_ALWAYS_INLINE void +emap_full_alloc_info_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr, + emap_full_alloc_ctx_t *full_alloc_ctx) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + rtree_edata_szind_slab_read(tsdn, &emap->rtree, rtree_ctx, + (uintptr_t)ptr, true, &full_alloc_ctx->edata, + &full_alloc_ctx->szind, &full_alloc_ctx->slab); +} + +/* + * The pointer is allowed to not be mapped. + * + * Returns true when the pointer is not present. + */ +JEMALLOC_ALWAYS_INLINE bool +emap_full_alloc_info_try_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr, + emap_full_alloc_ctx_t *full_alloc_ctx) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + return rtree_edata_szind_slab_read(tsdn, &emap->rtree, rtree_ctx, + (uintptr_t)ptr, false, &full_alloc_ctx->edata, + &full_alloc_ctx->szind, &full_alloc_ctx->slab); +} + /* * Fills in alloc_ctx, but only if it can be done easily (i.e. with a hit in the * L1 rtree cache. @@ -152,7 +187,7 @@ emap_alloc_info_lookup(tsdn_t *tsdn, emap_t *emap, void *ptr, * Returns whether or not alloc_ctx was filled in. */ JEMALLOC_ALWAYS_INLINE bool -emap_alloc_info_try_lookup_fast(tsd_t *tsd, emap_t *emap, void *ptr, +emap_alloc_info_try_lookup_fast(tsd_t *tsd, emap_t *emap, const void *ptr, alloc_ctx_t *alloc_ctx) { rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); bool res = rtree_szind_slab_read_fast(tsd_tsdn(tsd), &emap->rtree, diff --git a/include/jemalloc/internal/rtree.h b/include/jemalloc/internal/rtree.h index 339c7e5e..11a52ed0 100644 --- a/include/jemalloc/internal/rtree.h +++ b/include/jemalloc/internal/rtree.h @@ -440,15 +440,24 @@ rtree_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, */ JEMALLOC_ALWAYS_INLINE bool -rtree_edata_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, - uintptr_t key, bool dependent, edata_t **r_edata, szind_t *r_szind) { +rtree_edata_szind_slab_read(tsdn_t *tsdn, rtree_t *rtree, + rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, edata_t **r_edata, + szind_t *r_szind, bool *r_slab) { rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, dependent); if (!dependent && elm == NULL) { return true; } +#ifdef RTREE_LEAF_COMPACT + uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); + *r_edata = rtree_leaf_elm_bits_edata_get(bits); + *r_szind = rtree_leaf_elm_bits_szind_get(bits); + *r_slab = rtree_leaf_elm_bits_slab_get(bits); +#else *r_edata = rtree_leaf_elm_edata_read(tsdn, rtree, elm, dependent); *r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent); + *r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, dependent); +#endif return false; } diff --git a/include/jemalloc/internal/util.h b/include/jemalloc/internal/util.h index 304cb545..cb751479 100644 --- a/include/jemalloc/internal/util.h +++ b/include/jemalloc/internal/util.h @@ -62,6 +62,13 @@ get_errno(void) { #endif } +JEMALLOC_ALWAYS_INLINE void +util_assume(bool b) { + if (!b) { + unreachable(); + } +} + #undef UTIL_INLINE #endif /* JEMALLOC_INTERNAL_UTIL_H */ diff --git a/src/arena.c b/src/arena.c index 2df7df6e..b2a0ac76 100644 --- a/src/arena.c +++ b/src/arena.c @@ -1111,10 +1111,8 @@ arena_reset(tsd_t *tsd, arena_t *arena) { malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); alloc_ctx_t alloc_ctx; - rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); - rtree_szind_slab_read(tsd_tsdn(tsd), &emap_global.rtree, - rtree_ctx, (uintptr_t)ptr, true, &alloc_ctx.szind, - &alloc_ctx.slab); + emap_alloc_info_lookup(tsd_tsdn(tsd), &emap_global, ptr, + &alloc_ctx); assert(alloc_ctx.szind != SC_NSIZES); if (config_stats || (config_prof && opt_prof)) { diff --git a/src/tcache.c b/src/tcache.c index e9331d03..b7c0a549 100644 --- a/src/tcache.c +++ b/src/tcache.c @@ -114,10 +114,10 @@ tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, /* Enabled with --enable-extra-size-check. */ static void -tbin_edatas_lookup_size_check(tsdn_t *tsdn, cache_bin_t *tbin, szind_t binind, - size_t nflush, edata_t **edatas){ - rtree_ctx_t rtree_ctx_fallback; - rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); +tbin_edatas_lookup_size_check(tsd_t *tsd, cache_bin_t *tbin, szind_t binind, + size_t nflush, edata_t **edatas) { + /* Avoids null-checking tsdn in the loop below. */ + util_assume(tsd != NULL); /* * Verify that the items in the tcache all have the correct size; this @@ -125,16 +125,16 @@ tbin_edatas_lookup_size_check(tsdn_t *tsdn, cache_bin_t *tbin, szind_t binind, * instead of corrupting metadata. Since this can be turned on for opt * builds, avoid the branch in the loop. */ - szind_t szind; - size_t sz_sum = binind * nflush; + size_t szind_sum = binind * nflush; void **bottom_item = cache_bin_bottom_item_get(tbin, binind); for (unsigned i = 0 ; i < nflush; i++) { - rtree_edata_szind_read(tsdn, &emap_global.rtree, - rtree_ctx, (uintptr_t)*(bottom_item - i), true, - &edatas[i], &szind); - sz_sum -= szind; + emap_full_alloc_ctx_t full_alloc_ctx; + emap_full_alloc_info_lookup(tsd_tsdn(tsd), &emap_global, + *(bottom_item - i), &full_alloc_ctx); + edatas[i] = full_alloc_ctx.edata; + szind_sum -= full_alloc_ctx.szind; } - if (sz_sum != 0) { + if (szind_sum != 0) { safety_check_fail_sized_dealloc(false); } } @@ -156,7 +156,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, tsdn_t *tsdn = tsd_tsdn(tsd); /* Look up edata once per item. */ if (config_opt_safety_checks) { - tbin_edatas_lookup_size_check(tsdn, tbin, binind, nflush, + tbin_edatas_lookup_size_check(tsd, tbin, binind, nflush, item_edata); } else { for (unsigned i = 0 ; i < nflush; i++) { diff --git a/test/unit/arena_reset.c b/test/unit/arena_reset.c index a1f1d07c..7fbde0b6 100644 --- a/test/unit/arena_reset.c +++ b/test/unit/arena_reset.c @@ -60,28 +60,25 @@ get_large_size(size_t ind) { /* Like ivsalloc(), but safe to call on discarded allocations. */ static size_t vsalloc(tsdn_t *tsdn, const void *ptr) { - rtree_ctx_t rtree_ctx_fallback; - rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); - - edata_t *edata; - szind_t szind; - if (rtree_edata_szind_read(tsdn, &emap_global.rtree, rtree_ctx, - (uintptr_t)ptr, false, &edata, &szind)) { + emap_full_alloc_ctx_t full_alloc_ctx; + bool missing = emap_full_alloc_info_try_lookup(tsdn, &emap_global, + ptr, &full_alloc_ctx); + if (missing) { return 0; } - if (edata == NULL) { + if (full_alloc_ctx.edata == NULL) { return 0; } - if (edata_state_get(edata) != extent_state_active) { + if (edata_state_get(full_alloc_ctx.edata) != extent_state_active) { return 0; } - if (szind == SC_NSIZES) { + if (full_alloc_ctx.szind == SC_NSIZES) { return 0; } - return sz_index2size(szind); + return sz_index2size(full_alloc_ctx.szind); } static unsigned