mirror of
https://github.com/jemalloc/jemalloc.git
synced 2026-04-30 09:07:53 +03:00
Renaming limit_usize_gap to disable_large_size_classes
This commit is contained in:
parent
51895c03e5
commit
1cc32aef65
18 changed files with 78 additions and 68 deletions
|
|
@ -291,12 +291,12 @@ static inline size_t
|
|||
edata_usize_get(const edata_t *edata) {
|
||||
assert(edata != NULL);
|
||||
/*
|
||||
* When sz_limit_usize_gap_enabled() is true, two cases:
|
||||
* When sz_large_size_classes_disabled() is true, two cases:
|
||||
* 1. if usize_from_ind is not smaller than SC_LARGE_MINCLASS,
|
||||
* usize_from_size is accurate;
|
||||
* 2. otherwise, usize_from_ind is accurate.
|
||||
*
|
||||
* When sz_limit_usize_gap_enabled() is not true, the two should be the
|
||||
* When sz_large_size_classes_disabled() is not true, the two should be the
|
||||
* same when usize_from_ind is not smaller than SC_LARGE_MINCLASS.
|
||||
*
|
||||
* Note sampled small allocs will be promoted. Their extent size is
|
||||
|
|
@ -316,9 +316,9 @@ edata_usize_get(const edata_t *edata) {
|
|||
}
|
||||
#endif
|
||||
|
||||
if (!sz_limit_usize_gap_enabled() || szind < SC_NBINS) {
|
||||
if (!sz_large_size_classes_disabled() || szind < SC_NBINS) {
|
||||
size_t usize_from_ind = sz_index2size(szind);
|
||||
if (!sz_limit_usize_gap_enabled() &&
|
||||
if (!sz_large_size_classes_disabled() &&
|
||||
usize_from_ind >= SC_LARGE_MINCLASS) {
|
||||
size_t size = (edata->e_size_esn & EDATA_SIZE_MASK);
|
||||
assert(size > sz_large_pad);
|
||||
|
|
@ -332,8 +332,8 @@ edata_usize_get(const edata_t *edata) {
|
|||
assert(size > sz_large_pad);
|
||||
size_t usize_from_size = size - sz_large_pad;
|
||||
/*
|
||||
* no matter limit-usize-gap enabled or not, usize retrieved from size
|
||||
* is not accurate when smaller than SC_LARGE_MINCLASS.
|
||||
* no matter large size classes disabled or not, usize retrieved from
|
||||
* size is not accurate when smaller than SC_LARGE_MINCLASS.
|
||||
*/
|
||||
assert(usize_from_size >= SC_LARGE_MINCLASS);
|
||||
return usize_from_size;
|
||||
|
|
|
|||
|
|
@ -237,7 +237,7 @@ emap_alloc_ctx_init(emap_alloc_ctx_t *alloc_ctx, szind_t szind, bool slab,
|
|||
alloc_ctx->szind = szind;
|
||||
alloc_ctx->slab = slab;
|
||||
alloc_ctx->usize = usize;
|
||||
assert(sz_limit_usize_gap_enabled() ||
|
||||
assert(sz_large_size_classes_disabled() ||
|
||||
usize == sz_index2size(szind));
|
||||
}
|
||||
|
||||
|
|
@ -248,7 +248,7 @@ emap_alloc_ctx_usize_get(emap_alloc_ctx_t *alloc_ctx) {
|
|||
assert(alloc_ctx->usize == sz_index2size(alloc_ctx->szind));
|
||||
return sz_index2size(alloc_ctx->szind);
|
||||
}
|
||||
assert(sz_limit_usize_gap_enabled() ||
|
||||
assert(sz_large_size_classes_disabled() ||
|
||||
alloc_ctx->usize == sz_index2size(alloc_ctx->szind));
|
||||
assert(alloc_ctx->usize <= SC_LARGE_MAXCLASS);
|
||||
return alloc_ctx->usize;
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ extern atomic_zu_t zero_realloc_count;
|
|||
extern bool opt_cache_oblivious;
|
||||
extern unsigned opt_debug_double_free_max_scan;
|
||||
extern size_t opt_calloc_madvise_threshold;
|
||||
extern bool opt_limit_usize_gap;
|
||||
extern bool opt_disable_large_size_classes;
|
||||
|
||||
extern const char *opt_malloc_conf_symlink;
|
||||
extern const char *opt_malloc_conf_env_var;
|
||||
|
|
|
|||
|
|
@ -287,11 +287,11 @@
|
|||
#endif
|
||||
|
||||
/*
|
||||
* When limit_usize_gap is enabled, the gaps between two contiguous
|
||||
* size classes should not exceed PAGE. This means there should be no concept
|
||||
* of size classes for sizes > SC_SMALL_MAXCLASS (or >= SC_LARGE_MINCLASS).
|
||||
* However, between SC_LARGE_MINCLASS (SC_NGROUP * PAGE) and
|
||||
* 2 * SC_NGROUP * PAGE, the size class also happens to be aligned with PAGE.
|
||||
* When large size classes are disabled, there is no concept of size classes
|
||||
* for sizes > SC_SMALLMAXCLASS (or >= SC_LARGE_MINCLASS). This ensures that
|
||||
* the overhead between the usable size and the user request size will not
|
||||
* exceed PAGE. Between SC_LARGE_MINCLASS (SC_NGROUP * PAGE) and
|
||||
* 2 * SC_NGROUP * PAGE, the size classes also happen to be aligned with PAGE.
|
||||
* Since tcache relies on size classes to work and it greatly increases the
|
||||
* perf of allocs & deallocs, we extend the existence of size class to
|
||||
* 2 * SC_NGROUP * PAGE ONLY for the tcache module. This means for all other
|
||||
|
|
|
|||
|
|
@ -55,8 +55,8 @@ extern size_t sz_large_pad;
|
|||
extern void sz_boot(const sc_data_t *sc_data, bool cache_oblivious);
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
sz_limit_usize_gap_enabled() {
|
||||
return opt_limit_usize_gap;
|
||||
sz_large_size_classes_disabled() {
|
||||
return opt_disable_large_size_classes;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE pszind_t
|
||||
|
|
@ -269,11 +269,11 @@ sz_index2size_unsafe(szind_t index) {
|
|||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
sz_index2size(szind_t index) {
|
||||
assert(!sz_limit_usize_gap_enabled() ||
|
||||
assert(!sz_large_size_classes_disabled() ||
|
||||
index <= sz_size2index(USIZE_GROW_SLOW_THRESHOLD));
|
||||
size_t size = sz_index2size_unsafe(index);
|
||||
/*
|
||||
* With limit_usize_gap enabled, the usize above
|
||||
* With large size classes disabled, the usize above
|
||||
* SC_LARGE_MINCLASS should grow by PAGE. However, for sizes
|
||||
* in [SC_LARGE_MINCLASS, USIZE_GROW_SLOW_THRESHOLD], the
|
||||
* usize would not change because the size class gap in this
|
||||
|
|
@ -285,7 +285,7 @@ sz_index2size(szind_t index) {
|
|||
* the size is no larger than USIZE_GROW_SLOW_THRESHOLD here
|
||||
* instead of SC_LARGE_MINCLASS.
|
||||
*/
|
||||
assert(!sz_limit_usize_gap_enabled() ||
|
||||
assert(!sz_large_size_classes_disabled() ||
|
||||
size <= USIZE_GROW_SLOW_THRESHOLD);
|
||||
return size;
|
||||
}
|
||||
|
|
@ -335,11 +335,11 @@ sz_s2u_compute(size_t size) {
|
|||
(ZU(1) << lg_ceil));
|
||||
}
|
||||
#endif
|
||||
if (size <= SC_SMALL_MAXCLASS || !sz_limit_usize_gap_enabled()) {
|
||||
if (size <= SC_SMALL_MAXCLASS || !sz_large_size_classes_disabled()) {
|
||||
return sz_s2u_compute_using_delta(size);
|
||||
} else {
|
||||
/*
|
||||
* With sz_limit_usize_gap_enabled() == true, usize of a large
|
||||
* With sz_large_size_classes_disabled() == true, usize of a large
|
||||
* allocation is calculated by ceiling size to the smallest
|
||||
* multiple of PAGE to minimize the memory overhead, especially
|
||||
* when using hugepages.
|
||||
|
|
|
|||
|
|
@ -170,7 +170,7 @@ CTL_PROTO(opt_prof_sys_thread_name)
|
|||
CTL_PROTO(opt_prof_time_res)
|
||||
CTL_PROTO(opt_lg_san_uaf_align)
|
||||
CTL_PROTO(opt_zero_realloc)
|
||||
CTL_PROTO(opt_limit_usize_gap)
|
||||
CTL_PROTO(opt_disable_large_size_classes)
|
||||
CTL_PROTO(opt_process_madvise_max_batch)
|
||||
CTL_PROTO(opt_malloc_conf_symlink)
|
||||
CTL_PROTO(opt_malloc_conf_env_var)
|
||||
|
|
@ -564,7 +564,7 @@ static const ctl_named_node_t opt_node[] = {
|
|||
{NAME("zero_realloc"), CTL(opt_zero_realloc)},
|
||||
{NAME("debug_double_free_max_scan"),
|
||||
CTL(opt_debug_double_free_max_scan)},
|
||||
{NAME("limit_usize_gap"), CTL(opt_limit_usize_gap)},
|
||||
{NAME("disable_large_size_classes"), CTL(opt_disable_large_size_classes)},
|
||||
{NAME("process_madvise_max_batch"), CTL(opt_process_madvise_max_batch)},
|
||||
{NAME("malloc_conf"), CHILD(named, opt_malloc_conf)}
|
||||
};
|
||||
|
|
@ -2355,7 +2355,7 @@ CTL_RO_NL_CGEN(config_uaf_detection, opt_lg_san_uaf_align,
|
|||
opt_lg_san_uaf_align, ssize_t)
|
||||
CTL_RO_NL_GEN(opt_zero_realloc,
|
||||
zero_realloc_mode_names[opt_zero_realloc_action], const char *)
|
||||
CTL_RO_NL_GEN(opt_limit_usize_gap, opt_limit_usize_gap, bool)
|
||||
CTL_RO_NL_GEN(opt_disable_large_size_classes, opt_disable_large_size_classes, bool)
|
||||
|
||||
/* malloc_conf options */
|
||||
CTL_RO_NL_CGEN(opt_malloc_conf_symlink, opt_malloc_conf_symlink,
|
||||
|
|
|
|||
22
src/eset.c
22
src/eset.c
|
|
@ -232,7 +232,7 @@ eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size,
|
|||
|
||||
/* See comments in eset_first_fit for why we enumerate search below. */
|
||||
pszind_t pind_prev = sz_psz2ind(sz_psz_quantize_floor(min_size));
|
||||
if (sz_limit_usize_gap_enabled() && pind != pind_prev) {
|
||||
if (sz_large_size_classes_disabled() && pind != pind_prev) {
|
||||
edata_t *ret = NULL;
|
||||
ret = eset_enumerate_alignment_search(eset, min_size, pind_prev,
|
||||
alignment);
|
||||
|
|
@ -287,7 +287,7 @@ eset_first_fit(eset_t *eset, size_t size, bool exact_only,
|
|||
pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(size));
|
||||
|
||||
if (exact_only) {
|
||||
if (sz_limit_usize_gap_enabled()) {
|
||||
if (sz_large_size_classes_disabled()) {
|
||||
pszind_t pind_prev =
|
||||
sz_psz2ind(sz_psz_quantize_floor(size));
|
||||
return eset_enumerate_search(eset, size, pind_prev,
|
||||
|
|
@ -300,28 +300,28 @@ eset_first_fit(eset_t *eset, size_t size, bool exact_only,
|
|||
|
||||
/*
|
||||
* Each element in the eset->bins is a heap corresponding to a size
|
||||
* class. When sz_limit_usize_gap_enabled() is false, all heaps after
|
||||
* class. When sz_large_size_classes_disabled() is false, all heaps after
|
||||
* pind (including pind itself) will surely satisfy the rquests while
|
||||
* heaps before pind cannot satisfy the request because usize is
|
||||
* calculated based on size classes then. However, when
|
||||
* sz_limit_usize_gap_enabled() is true, usize is calculated by ceiling
|
||||
* user requested size to the closest multiple of PAGE. This means in
|
||||
* the heap before pind, i.e., pind_prev, there may exist extents able
|
||||
* to satisfy the request and we should enumerate the heap when
|
||||
* pind_prev != pind.
|
||||
* sz_large_size_classes_disabled() is true, usize is calculated by
|
||||
* ceiling user requested size to the closest multiple of PAGE. This
|
||||
* means in the heap before pind, i.e., pind_prev, there may exist
|
||||
* extents able to satisfy the request and we should enumerate the heap
|
||||
* when pind_prev != pind.
|
||||
*
|
||||
* For example, when PAGE=4KB and the user requested size is 1MB + 4KB,
|
||||
* usize would be 1.25MB when sz_limit_usize_gap_enabled() is false.
|
||||
* usize would be 1.25MB when sz_large_size_classes_disabled() is false.
|
||||
* pind points to the heap containing extents ranging in
|
||||
* [1.25MB, 1.5MB). Thus, searching starting from pind will not miss
|
||||
* any candidates. When sz_limit_usize_gap_enabled() is true, the
|
||||
* any candidates. When sz_large_size_classes_disabled() is true, the
|
||||
* usize would be 1MB + 4KB and pind still points to the same heap.
|
||||
* In this case, the heap pind_prev points to, which contains extents
|
||||
* in the range [1MB, 1.25MB), may contain candidates satisfying the
|
||||
* usize and thus should be enumerated.
|
||||
*/
|
||||
pszind_t pind_prev = sz_psz2ind(sz_psz_quantize_floor(size));
|
||||
if (sz_limit_usize_gap_enabled() && pind != pind_prev){
|
||||
if (sz_large_size_classes_disabled() && pind != pind_prev){
|
||||
ret = eset_enumerate_search(eset, size, pind_prev,
|
||||
/* exact_only */ false, &ret_summ);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -123,7 +123,12 @@ zero_realloc_action_t opt_zero_realloc_action =
|
|||
|
||||
atomic_zu_t zero_realloc_count = ATOMIC_INIT(0);
|
||||
|
||||
bool opt_limit_usize_gap = true;
|
||||
/*
|
||||
* Disable large size classes is now the default behavior in jemalloc.
|
||||
* Although it is configurable in MALLOC_CONF, this is mainly for debugging
|
||||
* purposes and should not be tuned.
|
||||
*/
|
||||
bool opt_disable_large_size_classes = true;
|
||||
|
||||
const char *const zero_realloc_mode_names[] = {
|
||||
"alloc",
|
||||
|
|
@ -1780,8 +1785,14 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
|
|||
"san_guard_large", 0, SIZE_T_MAX,
|
||||
CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, false)
|
||||
|
||||
CONF_HANDLE_BOOL(opt_limit_usize_gap,
|
||||
"limit_usize_gap");
|
||||
/*
|
||||
* Disable large size classes is now the default
|
||||
* behavior in jemalloc. Although it is configurable
|
||||
* in MALLOC_CONF, this is mainly for debugging
|
||||
* purposes and should not be tuned.
|
||||
*/
|
||||
CONF_HANDLE_BOOL(opt_disable_large_size_classes,
|
||||
"disable_large_size_classes");
|
||||
|
||||
CONF_ERROR("Invalid conf pair", k, klen, v, vlen);
|
||||
#undef CONF_ERROR
|
||||
|
|
@ -2406,7 +2417,7 @@ aligned_usize_get(size_t size, size_t alignment, size_t *usize, szind_t *ind,
|
|||
if (unlikely(*ind >= SC_NSIZES)) {
|
||||
return true;
|
||||
}
|
||||
*usize = sz_limit_usize_gap_enabled()? sz_s2u(size):
|
||||
*usize = sz_large_size_classes_disabled()? sz_s2u(size):
|
||||
sz_index2size(*ind);
|
||||
assert(*usize > 0 && *usize <= SC_LARGE_MAXCLASS);
|
||||
return false;
|
||||
|
|
|
|||
33
src/pac.c
33
src/pac.c
|
|
@ -143,25 +143,26 @@ pac_alloc_real(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size,
|
|||
}
|
||||
|
||||
/*
|
||||
* We batched allocate a larger extent when limit_usize_gap is enabled
|
||||
* We batched allocate a larger extent with large size classes disabled
|
||||
* because the reuse of extents in the dirty pool is worse without size
|
||||
* classes for large allocs. For instance, when limit_usize_gap is not
|
||||
* enabled, 1.1MB, 1.15MB, and 1.2MB allocs will all be ceiled to
|
||||
* 1.25MB and can reuse the same buffer if they are alloc & dalloc
|
||||
* sequentially. However, with limit_usize_gap enabled, they cannot
|
||||
* reuse the same buffer and their sequential allocs & dallocs will
|
||||
* result in three different extents. Thus, we cache extra mergeable
|
||||
* extents in the dirty pool to improve the reuse. We skip this
|
||||
* optimization if both maps_coalesce and opt_retain are disabled
|
||||
* because VM is not cheap enough to be used aggressively and extents
|
||||
* cannot be merged at will (only extents from the same VirtualAlloc
|
||||
* can be merged). Note that it could still be risky to cache more
|
||||
* extents when either mpas_coalesce or opt_retain is enabled. Yet
|
||||
* doing so is still beneficial in improving the reuse of extents
|
||||
* with some limits. This choice should be reevaluated if
|
||||
* classes for large allocs. For instance, when
|
||||
* disable_large_size_classes is false, 1.1MB, 1.15MB, and 1.2MB allocs
|
||||
* will all be ceiled to 1.25MB and can reuse the same buffer if they
|
||||
* are alloc & dalloc sequentially. However, with
|
||||
* disable_large_size_classes being true, they cannot reuse the same
|
||||
* buffer and their sequential allocs & dallocs will result in three
|
||||
* different extents. Thus, we cache extra mergeable extents in the
|
||||
* dirty pool to improve the reuse. We skip this optimization if both
|
||||
* maps_coalesce and opt_retain are disabled because VM is not cheap
|
||||
* enough in such cases to be used aggressively and extents cannot be
|
||||
* merged at will (only extents from the same VirtualAlloc can be
|
||||
* merged). Note that it could still be risky to cache more extents
|
||||
* when either mpas_coalesce or opt_retain is enabled. Yet doing
|
||||
* so is still beneficial in improving the reuse of extents with some
|
||||
* limits. This choice should be reevaluated if
|
||||
* pac_alloc_retained_batched_size is changed to be more aggressive.
|
||||
*/
|
||||
if (sz_limit_usize_gap_enabled() && edata == NULL &&
|
||||
if (sz_large_size_classes_disabled() && edata == NULL &&
|
||||
(maps_coalesce || opt_retain)) {
|
||||
size_t batched_size = pac_alloc_retained_batched_size(size);
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -514,7 +514,7 @@ void prof_unbias_map_init(void) {
|
|||
#ifdef JEMALLOC_PROF
|
||||
for (szind_t i = 0; i < SC_NSIZES; i++) {
|
||||
/*
|
||||
* When limit_usize_gap is enabled, the unbiased calculation
|
||||
* With large size classes disabled, the unbiased calculation
|
||||
* here is not as accurate as it was because usize now changes
|
||||
* in a finer grain while the unbiased_sz is still calculated
|
||||
* using the old way.
|
||||
|
|
|
|||
|
|
@ -368,7 +368,7 @@ psset_pick_alloc(psset_t *psset, size_t size) {
|
|||
|
||||
/* See comments in eset_first_fit for why we enumerate search below. */
|
||||
pszind_t pind_prev = sz_psz2ind(sz_psz_quantize_floor(size));
|
||||
if (sz_limit_usize_gap_enabled() && pind_prev < min_pind) {
|
||||
if (sz_large_size_classes_disabled() && pind_prev < min_pind) {
|
||||
ps = psset_enumerate_search(psset, pind_prev, size);
|
||||
if (ps != NULL) {
|
||||
return ps;
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ sec_init(tsdn_t *tsdn, sec_t *sec, base_t *base, pai_t *fallback,
|
|||
* USIZE_GROW_SLOW_THRESHOLD because the usize above this increases
|
||||
* by PAGE and the number of usizes is too large.
|
||||
*/
|
||||
assert(!sz_limit_usize_gap_enabled() ||
|
||||
assert(!sz_large_size_classes_disabled() ||
|
||||
opts->max_alloc <= USIZE_GROW_SLOW_THRESHOLD);
|
||||
|
||||
size_t max_alloc = PAGE_FLOOR(opts->max_alloc);
|
||||
|
|
|
|||
|
|
@ -1730,7 +1730,7 @@ stats_general_print(emitter_t *emitter) {
|
|||
OPT_WRITE_CHAR_P("stats_interval_opts")
|
||||
OPT_WRITE_CHAR_P("zero_realloc")
|
||||
OPT_WRITE_SIZE_T("process_madvise_max_batch")
|
||||
OPT_WRITE_BOOL("limit_usize_gap")
|
||||
OPT_WRITE_BOOL("disable_large_size_classes")
|
||||
|
||||
emitter_dict_end(emitter); /* Close "opt". */
|
||||
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ for t in $@; do
|
|||
# per test shell script to ignore the @JEMALLOC_CPREFIX@ detail).
|
||||
enable_fill=@enable_fill@ \
|
||||
enable_prof=@enable_prof@ \
|
||||
limit_usize_gap=@limit_usize_gap@ \
|
||||
disable_large_size_classes=@disable_large_size_classes@ \
|
||||
. @srcroot@${t}.sh && \
|
||||
export_malloc_conf && \
|
||||
$JEMALLOC_TEST_PREFIX ${t}@exe@ @abs_srcroot@ @abs_objroot@
|
||||
|
|
|
|||
|
|
@ -411,11 +411,11 @@ TEST_BEGIN(test_decay_never) {
|
|||
size_t pdirty_prev = get_arena_pdirty(arena_ind);
|
||||
size_t pmuzzy_prev = get_arena_pmuzzy(arena_ind);
|
||||
/*
|
||||
* With limit_usize_gap enabled, some more extents
|
||||
* With sz_large_size_classes_disabled() = true, some more extents
|
||||
* are cached in the dirty pool, making the assumption below
|
||||
* not true.
|
||||
*/
|
||||
if (!sz_limit_usize_gap_enabled()) {
|
||||
if (!sz_large_size_classes_disabled()) {
|
||||
expect_zu_eq(pdirty_prev, 0, "Unexpected dirty pages");
|
||||
}
|
||||
expect_zu_eq(pmuzzy_prev, 0, "Unexpected muzzy pages");
|
||||
|
|
|
|||
|
|
@ -333,7 +333,7 @@ TEST_BEGIN(test_mallctl_opt) {
|
|||
TEST_MALLCTL_OPT(bool, prof_sys_thread_name, prof);
|
||||
TEST_MALLCTL_OPT(ssize_t, lg_san_uaf_align, uaf_detection);
|
||||
TEST_MALLCTL_OPT(unsigned, debug_double_free_max_scan, always);
|
||||
TEST_MALLCTL_OPT(bool, limit_usize_gap, always);
|
||||
TEST_MALLCTL_OPT(bool, disable_large_size_classes, always);
|
||||
TEST_MALLCTL_OPT(size_t, process_madvise_max_batch, always);
|
||||
|
||||
#undef TEST_MALLCTL_OPT
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ TEST_BEGIN(test_size_classes) {
|
|||
size_t size_class, max_size_class;
|
||||
szind_t index, gen_index, max_index;
|
||||
|
||||
max_size_class = sz_limit_usize_gap_enabled()? SC_SMALL_MAXCLASS:
|
||||
max_size_class = sz_large_size_classes_disabled()? SC_SMALL_MAXCLASS:
|
||||
get_max_size_class();
|
||||
max_index = sz_size2index(max_size_class);
|
||||
|
||||
|
|
@ -81,7 +81,7 @@ TEST_BEGIN(test_size_classes) {
|
|||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_grow_slow_size_classes) {
|
||||
test_skip_if(!sz_limit_usize_gap_enabled());
|
||||
test_skip_if(!sz_large_size_classes_disabled());
|
||||
|
||||
size_t size = SC_LARGE_MINCLASS;
|
||||
size_t target_usize = SC_LARGE_MINCLASS;
|
||||
|
|
|
|||
|
|
@ -1,5 +1,3 @@
|
|||
#!/bin/sh
|
||||
|
||||
if [ "x${limit_usize_gap}" = "x1" ] ; then
|
||||
export MALLOC_CONF="limit_usize_gap:true"
|
||||
fi
|
||||
export MALLOC_CONF="disable_large_size_classes:true"
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue