Remove hpa_sec_batch_fill_extra and calculate nallocs dynamically.

This commit is contained in:
Tony Printezis 2026-03-02 11:11:09 -08:00
parent 6515df8cec
commit c0e2a93e4d
14 changed files with 268 additions and 98 deletions

View file

@ -432,6 +432,27 @@ void hpdata_init(hpdata_t *hpdata, void *addr, uint64_t age, bool is_huge);
void *hpdata_reserve_alloc(hpdata_t *hpdata, size_t sz);
void hpdata_unreserve(hpdata_t *hpdata, void *addr, size_t sz);
typedef struct hpdata_alloc_offset_s hpdata_alloc_offset_t;
struct hpdata_alloc_offset_s {
size_t index;
size_t len;
};
/*
* Given an hpdata which can serve an allocation request of size sz,
* find between one and max_nallocs offsets that can satisfy such
* an allocation request and buffer them in offsets (without actually
* reserving any space or updating hpdata). Return the number
* of offsets discovered.
*/
size_t hpdata_find_alloc_offsets(hpdata_t *hpdata, size_t sz,
hpdata_alloc_offset_t *offsets, size_t max_nallocs);
/* Reserve the allocation for the given offset. */
void *hpdata_reserve_alloc_offset(
hpdata_t *hpdata, size_t sz, hpdata_alloc_offset_t *offset);
void hpdata_post_reserve_alloc_offsets(
hpdata_t *hpdata, hpdata_alloc_offset_t *offsets, size_t nallocs);
/*
* The hpdata_purge_prepare_t allows grabbing the metadata required to purge
* subranges of a hugepage while holding a lock, drop the lock during the actual

View file

@ -96,6 +96,19 @@ sec_size_supported(sec_t *sec, size_t size) {
return sec_is_used(sec) && size <= sec->opts.max_alloc;
}
/* Max number of extends we would allocate out of a single huge page. */
#define MAX_SEC_NALLOCS 4
/* Fill the SEC up to max_bytes / MAX_BYTES_DIV */
#define MAX_BYTES_DIV 4
/*
* Calculate the number of extends we will try to allocate out of
* a single huge page for a given allocation size. The result will be
* in the range [1, MAX_SEC_NALLOCS].
*/
size_t sec_calc_nallocs_for_size(sec_t *sec, size_t size);
/* If sec does not have extent available, it will return NULL. */
edata_t *sec_alloc(tsdn_t *tsdn, sec_t *sec, size_t size);
void sec_fill(tsdn_t *tsdn, sec_t *sec, size_t size,

View file

@ -27,16 +27,9 @@ struct sec_opts_s {
* until we are 1/4 below max_bytes.
*/
size_t max_bytes;
/*
* When we can't satisfy an allocation out of the SEC because there are
* no available ones cached, allocator will allocate a batch with extra
* batch_fill_extra extents of the same size.
*/
size_t batch_fill_extra;
};
#define SEC_OPTS_NSHARDS_DEFAULT 2
#define SEC_OPTS_BATCH_FILL_EXTRA_DEFAULT 3
#define SEC_OPTS_MAX_ALLOC_DEFAULT ((32 * 1024) < PAGE ? PAGE : (32 * 1024))
#define SEC_OPTS_MAX_BYTES_DEFAULT \
((256 * 1024) < (4 * SEC_OPTS_MAX_ALLOC_DEFAULT) \
@ -45,6 +38,6 @@ struct sec_opts_s {
#define SEC_OPTS_DEFAULT \
{SEC_OPTS_NSHARDS_DEFAULT, SEC_OPTS_MAX_ALLOC_DEFAULT, \
SEC_OPTS_MAX_BYTES_DEFAULT, SEC_OPTS_BATCH_FILL_EXTRA_DEFAULT}
SEC_OPTS_MAX_BYTES_DEFAULT}
#endif /* JEMALLOC_INTERNAL_SEC_OPTS_H */

View file

@ -6,6 +6,13 @@
#define UTIL_INLINE static inline
#ifndef MIN
# define MIN(a, b) (((a) < (b)) ? (a) : (b))
#endif
#ifndef MAX
# define MAX(a, b) (((a) > (b)) ? (a) : (b))
#endif
/* Junk fill patterns. */
#ifndef JEMALLOC_ALLOC_JUNK
# define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5)
@ -153,4 +160,5 @@ util_prefetch_write_range(void *ptr, size_t sz) {
*/
bool multi_setting_parse_next(const char **setting_segment_cur,
size_t *len_left, size_t *key_start, size_t *key_end, size_t *value);
#endif /* JEMALLOC_INTERNAL_UTIL_H */