diff --git a/include/jemalloc/internal/base.h b/include/jemalloc/internal/base.h index 86b0cf4a..c8004b25 100644 --- a/include/jemalloc/internal/base.h +++ b/include/jemalloc/internal/base.h @@ -6,6 +6,12 @@ #include "jemalloc/internal/ehooks.h" #include "jemalloc/internal/mutex.h" +/* + * Alignment when THP is not enabled. Set to constant 2M in case the HUGEPAGE + * value is unexpected high (which would cause VM over-reservation). + */ +#define BASE_BLOCK_MIN_ALIGN ((size_t)2 << 20) + enum metadata_thp_mode_e { metadata_thp_disabled = 0, /* @@ -26,7 +32,6 @@ typedef enum metadata_thp_mode_e metadata_thp_mode_t; extern metadata_thp_mode_t opt_metadata_thp; extern const char *const metadata_thp_mode_names[]; - /* Embedded at the beginning of every block of base-managed virtual memory. */ typedef struct base_block_s base_block_t; struct base_block_s { diff --git a/src/base.c b/src/base.c index 1d5e8fcd..ac8598eb 100644 --- a/src/base.c +++ b/src/base.c @@ -42,9 +42,17 @@ base_map(tsdn_t *tsdn, ehooks_t *ehooks, unsigned ind, size_t size) { bool zero = true; bool commit = true; - /* Use huge page sizes and alignment regardless of opt_metadata_thp. */ - assert(size == HUGEPAGE_CEILING(size)); - size_t alignment = HUGEPAGE; + /* + * Use huge page sizes and alignment when opt_metadata_thp is enabled + * or auto. + */ + size_t alignment; + if (opt_metadata_thp == metadata_thp_disabled) { + alignment = BASE_BLOCK_MIN_ALIGN; + } else { + assert(size == HUGEPAGE_CEILING(size)); + alignment = HUGEPAGE; + } if (ehooks_are_default(ehooks)) { addr = extent_alloc_mmap(NULL, size, alignment, &zero, &commit); if (have_madvise_huge && addr) { @@ -277,6 +285,13 @@ base_extent_bump_alloc(tsdn_t *tsdn, base_t *base, edata_t *edata, size_t size, return ret; } +static size_t +base_block_size_ceil(size_t block_size) { + return opt_metadata_thp == metadata_thp_disabled ? + ALIGNMENT_CEILING(block_size, BASE_BLOCK_MIN_ALIGN) : + HUGEPAGE_CEILING(block_size); +} + /* * Allocate a block of virtual memory that is large enough to start with a * base_block_t header, followed by an object of specified size and alignment. @@ -295,14 +310,14 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, ehooks_t *ehooks, unsigned ind, * Create increasingly larger blocks in order to limit the total number * of disjoint virtual memory ranges. Choose the next size in the page * size class series (skipping size classes that are not a multiple of - * HUGEPAGE), or a size large enough to satisfy the requested size and - * alignment, whichever is larger. + * HUGEPAGE when using metadata_thp), or a size large enough to satisfy + * the requested size and alignment, whichever is larger. */ - size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size - + usize)); + size_t min_block_size = base_block_size_ceil(sz_psz2u(header_size + + gap_size + usize)); pszind_t pind_next = (*pind_last + 1 < sz_psz2ind(SC_LARGE_MAXCLASS)) ? *pind_last + 1 : *pind_last; - size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next)); + size_t next_block_size = base_block_size_ceil(sz_pind2sz(pind_next)); size_t block_size = (min_block_size > next_block_size) ? min_block_size : next_block_size; base_block_t *block = (base_block_t *)base_map(tsdn, ehooks, ind, diff --git a/src/exp_grow.c b/src/exp_grow.c index 386471f4..955823a1 100644 --- a/src/exp_grow.c +++ b/src/exp_grow.c @@ -3,6 +3,12 @@ void exp_grow_init(exp_grow_t *exp_grow) { - exp_grow->next = sz_psz2ind(HUGEPAGE); + /* + * Enforce a minimal of 2M grow, which is convenient for the huge page + * use cases. Avoid using HUGEPAGE as the value though, because on some + * platforms it can be very large (e.g. 512M on aarch64 w/ 64K pages). + */ + const size_t min_grow = (size_t)2 << 20; + exp_grow->next = sz_psz2ind(min_grow); exp_grow->limit = sz_psz2ind(SC_LARGE_MAXCLASS); }