Improve const correctness in the repo

This commit is contained in:
Slobodan Predolac 2026-04-30 13:27:40 -07:00 committed by Guangli Dai
parent 86f058287f
commit 7638093c73
21 changed files with 98 additions and 90 deletions

View file

@ -51,11 +51,11 @@ void arena_handle_deferred_work(tsdn_t *tsdn, arena_t *arena);
edata_t *arena_extent_alloc_large(
tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero);
void arena_extent_dalloc_large_prep(
tsdn_t *tsdn, arena_t *arena, edata_t *edata);
tsdn_t *tsdn, arena_t *arena, const edata_t *edata);
void arena_extent_ralloc_large_shrink(
tsdn_t *tsdn, arena_t *arena, edata_t *edata, size_t oldusize);
tsdn_t *tsdn, arena_t *arena, const edata_t *edata, size_t oldusize);
void arena_extent_ralloc_large_expand(
tsdn_t *tsdn, arena_t *arena, edata_t *edata, size_t oldusize);
tsdn_t *tsdn, arena_t *arena, const edata_t *edata, size_t oldusize);
bool arena_decay_ms_set(
tsdn_t *tsdn, arena_t *arena, extent_state_t state, ssize_t decay_ms);
ssize_t arena_decay_ms_get(arena_t *arena, extent_state_t state);
@ -88,12 +88,12 @@ bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
size_t size, size_t alignment, bool zero, bool slab, tcache_t *tcache,
hook_ralloc_args_t *hook_args);
dss_prec_t arena_dss_prec_get(arena_t *arena);
ehooks_t *arena_get_ehooks(arena_t *arena);
dss_prec_t arena_dss_prec_get(const arena_t *arena);
ehooks_t *arena_get_ehooks(const arena_t *arena);
extent_hooks_t *arena_set_extent_hooks(
tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks);
bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
void arena_name_get(arena_t *arena, char *name);
void arena_name_get(const arena_t *arena, char *name);
void arena_name_set(arena_t *arena, const char *name);
ssize_t arena_dirty_decay_ms_default_get(void);
bool arena_dirty_decay_ms_default_set(ssize_t decay_ms);
@ -101,7 +101,7 @@ ssize_t arena_muzzy_decay_ms_default_get(void);
bool arena_muzzy_decay_ms_default_set(ssize_t decay_ms);
bool arena_retain_grow_limit_get_set(
tsd_t *tsd, arena_t *arena, size_t *old_limit, size_t *new_limit);
unsigned arena_nthreads_get(arena_t *arena, bool internal);
unsigned arena_nthreads_get(const arena_t *arena, bool internal);
void arena_nthreads_inc(arena_t *arena, bool internal);
void arena_nthreads_dec(arena_t *arena, bool internal);
arena_t *arena_new(tsdn_t *tsdn, unsigned ind, const arena_config_t *config);

View file

@ -20,7 +20,7 @@ arena_internal_sub(arena_t *arena, size_t size) {
}
static inline size_t
arena_internal_get(arena_t *arena) {
arena_internal_get(const arena_t *arena) {
return atomic_load_zu(&arena->stats.internal, ATOMIC_RELAXED);
}

View file

@ -24,8 +24,8 @@ struct bin_dalloc_locked_info_s {
/* Find the region index of a pointer within a slab. */
JEMALLOC_ALWAYS_INLINE size_t
bin_slab_regind_impl(
div_info_t *div_info, szind_t binind, edata_t *slab, const void *ptr) {
bin_slab_regind_impl(const div_info_t *div_info, szind_t binind,
const edata_t *slab, const void *ptr) {
size_t diff, regind;
/* Freeing a pointer outside the slab can cause assertion failure. */
@ -45,8 +45,8 @@ bin_slab_regind_impl(
}
JEMALLOC_ALWAYS_INLINE size_t
bin_slab_regind(bin_dalloc_locked_info_t *info, szind_t binind,
edata_t *slab, const void *ptr) {
bin_slab_regind(const bin_dalloc_locked_info_t *info, szind_t binind,
const edata_t *slab, const void *ptr) {
size_t regind = bin_slab_regind_impl(
&info->div_info, binind, slab, ptr);
return regind;

View file

@ -181,7 +181,7 @@ void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill);
size_t bitmap_size(const bitmap_info_t *binfo);
static inline bool
bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) {
bitmap_full(const bitmap_t *bitmap, const bitmap_info_t *binfo) {
#ifdef BITMAP_USE_TREE
size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
bitmap_t rg = bitmap[rgoff];
@ -200,7 +200,7 @@ bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) {
}
static inline bool
bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
bitmap_get(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
size_t goff;
bitmap_t g;

View file

@ -24,7 +24,7 @@ struct div_info_s {
void div_init(div_info_t *div_info, size_t divisor);
static inline size_t
div_compute(div_info_t *div_info, size_t n) {
div_compute(const div_info_t *div_info, size_t n) {
assert(n <= (uint32_t)-1);
/*
* This generates, e.g. mov; imul; shr on x86-64. On a 32-bit machine,

View file

@ -23,27 +23,27 @@ struct ecache_s {
};
static inline size_t
ecache_npages_get(ecache_t *ecache) {
ecache_npages_get(const ecache_t *ecache) {
return eset_npages_get(&ecache->eset)
+ eset_npages_get(&ecache->guarded_eset);
}
/* Get the number of extents in the given page size index. */
static inline size_t
ecache_nextents_get(ecache_t *ecache, pszind_t ind) {
ecache_nextents_get(const ecache_t *ecache, pszind_t ind) {
return eset_nextents_get(&ecache->eset, ind)
+ eset_nextents_get(&ecache->guarded_eset, ind);
}
/* Get the sum total bytes of the extents in the given page size index. */
static inline size_t
ecache_nbytes_get(ecache_t *ecache, pszind_t ind) {
ecache_nbytes_get(const ecache_t *ecache, pszind_t ind) {
return eset_nbytes_get(&ecache->eset, ind)
+ eset_nbytes_get(&ecache->guarded_eset, ind);
}
static inline unsigned
ecache_ind_get(ecache_t *ecache) {
ecache_ind_get(const ecache_t *ecache) {
return ecache->ind;
}

View file

@ -645,7 +645,7 @@ edata_prof_recent_alloc_set_dont_call_directly(
}
static inline bool
edata_is_head_get(edata_t *edata) {
edata_is_head_get(const edata_t *edata) {
return (bool)((edata->e_bits & EDATA_BITS_IS_HEAD_MASK)
>> EDATA_BITS_IS_HEAD_SHIFT);
}

View file

@ -110,12 +110,12 @@ ehooks_set_extent_hooks_ptr(ehooks_t *ehooks, extent_hooks_t *extent_hooks) {
}
static inline extent_hooks_t *
ehooks_get_extent_hooks_ptr(ehooks_t *ehooks) {
ehooks_get_extent_hooks_ptr(const ehooks_t *ehooks) {
return (extent_hooks_t *)atomic_load_p(&ehooks->ptr, ATOMIC_ACQUIRE);
}
static inline bool
ehooks_are_default(ehooks_t *ehooks) {
ehooks_are_default(const ehooks_t *ehooks) {
return ehooks_get_extent_hooks_ptr(ehooks)
== &ehooks_default_extent_hooks;
}
@ -126,7 +126,7 @@ ehooks_are_default(ehooks_t *ehooks) {
* include some checks for such cases.
*/
static inline bool
ehooks_dalloc_will_fail(ehooks_t *ehooks) {
ehooks_dalloc_will_fail(const ehooks_t *ehooks) {
if (ehooks_are_default(ehooks)) {
return opt_retain;
} else {
@ -135,17 +135,17 @@ ehooks_dalloc_will_fail(ehooks_t *ehooks) {
}
static inline bool
ehooks_split_will_fail(ehooks_t *ehooks) {
ehooks_split_will_fail(const ehooks_t *ehooks) {
return ehooks_get_extent_hooks_ptr(ehooks)->split == NULL;
}
static inline bool
ehooks_merge_will_fail(ehooks_t *ehooks) {
ehooks_merge_will_fail(const ehooks_t *ehooks) {
return ehooks_get_extent_hooks_ptr(ehooks)->merge == NULL;
}
static inline bool
ehooks_guard_will_fail(ehooks_t *ehooks) {
ehooks_guard_will_fail(const ehooks_t *ehooks) {
/*
* Before the guard hooks are officially introduced, limit the use to
* the default hooks only.

View file

@ -60,11 +60,11 @@ struct eset_s {
void eset_init(eset_t *eset, extent_state_t state);
size_t eset_npages_get(eset_t *eset);
size_t eset_npages_get(const eset_t *eset);
/* Get the number of extents in the given page size index. */
size_t eset_nextents_get(eset_t *eset, pszind_t ind);
size_t eset_nextents_get(const eset_t *eset, pszind_t ind);
/* Get the sum total bytes of the extents in the given page size index. */
size_t eset_nbytes_get(eset_t *eset, pszind_t ind);
size_t eset_nbytes_get(const eset_t *eset, pszind_t ind);
void eset_insert(eset_t *eset, edata_t *edata);
void eset_remove(eset_t *eset, edata_t *edata);

View file

@ -91,7 +91,7 @@ extent_neighbor_head_state_mergeable(
}
JEMALLOC_ALWAYS_INLINE bool
extent_can_acquire_neighbor(edata_t *edata, rtree_contents_t contents,
extent_can_acquire_neighbor(const edata_t *edata, rtree_contents_t contents,
extent_pai_t pai, extent_state_t expected_state, bool forward,
bool expanding) {
edata_t *neighbor = contents.edata;

View file

@ -25,7 +25,7 @@ fb_init(fb_group_t *fb, size_t nbits) {
}
static inline bool
fb_empty(fb_group_t *fb, size_t nbits) {
fb_empty(const fb_group_t *fb, size_t nbits) {
size_t ngroups = FB_NGROUPS(nbits);
for (size_t i = 0; i < ngroups; i++) {
if (fb[i] != 0) {
@ -36,7 +36,7 @@ fb_empty(fb_group_t *fb, size_t nbits) {
}
static inline bool
fb_full(fb_group_t *fb, size_t nbits) {
fb_full(const fb_group_t *fb, size_t nbits) {
size_t ngroups = FB_NGROUPS(nbits);
size_t trailing_bits = nbits % FB_GROUP_BITS;
size_t limit = (trailing_bits == 0 ? ngroups : ngroups - 1);
@ -52,7 +52,7 @@ fb_full(fb_group_t *fb, size_t nbits) {
}
static inline bool
fb_get(fb_group_t *fb, size_t nbits, size_t bit) {
fb_get(const fb_group_t *fb, size_t nbits, size_t bit) {
assert(bit < nbits);
size_t group_ind = bit / FB_GROUP_BITS;
size_t bit_ind = bit % FB_GROUP_BITS;
@ -156,15 +156,21 @@ fb_scount_visitor(void *ctx, fb_group_t *fb, fb_group_t mask) {
/* Finds the number of set bit in the of length cnt starting at start. */
JEMALLOC_ALWAYS_INLINE size_t
fb_scount(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) {
fb_scount(const fb_group_t *fb, size_t nbits, size_t start, size_t cnt) {
size_t scount = 0;
fb_visit_impl(fb, nbits, &fb_scount_visitor, &scount, start, cnt);
/*
* fb_visit_impl is shared with mutating visitors (e.g. fb_set_range),
* so it takes a non-const fb. fb_scount_visitor only reads, so the
* cast is safe.
*/
fb_visit_impl((fb_group_t *)fb, nbits, &fb_scount_visitor, &scount,
start, cnt);
return scount;
}
/* Finds the number of unset bit in the of length cnt starting at start. */
JEMALLOC_ALWAYS_INLINE size_t
fb_ucount(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) {
fb_ucount(const fb_group_t *fb, size_t nbits, size_t start, size_t cnt) {
size_t scount = fb_scount(fb, nbits, start, cnt);
return cnt - scount;
}
@ -176,8 +182,8 @@ fb_ucount(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) {
* Returns the number of bits in the bitmap if no such bit exists.
*/
JEMALLOC_ALWAYS_INLINE ssize_t
fb_find_impl(
fb_group_t *fb, size_t nbits, size_t start, bool val, bool forward) {
fb_find_impl(const fb_group_t *fb, size_t nbits, size_t start, bool val,
bool forward) {
assert(start < nbits);
size_t ngroups = FB_NGROUPS(nbits);
ssize_t group_ind = start / FB_GROUP_BITS;
@ -226,14 +232,14 @@ fb_find_impl(
* number of bits in the bitmap if no such bit exists.
*/
static inline size_t
fb_ffu(fb_group_t *fb, size_t nbits, size_t min_bit) {
fb_ffu(const fb_group_t *fb, size_t nbits, size_t min_bit) {
return (size_t)fb_find_impl(fb, nbits, min_bit, /* val */ false,
/* forward */ true);
}
/* The same, but looks for an unset bit. */
static inline size_t
fb_ffs(fb_group_t *fb, size_t nbits, size_t min_bit) {
fb_ffs(const fb_group_t *fb, size_t nbits, size_t min_bit) {
return (size_t)fb_find_impl(fb, nbits, min_bit, /* val */ true,
/* forward */ true);
}
@ -243,21 +249,21 @@ fb_ffs(fb_group_t *fb, size_t nbits, size_t min_bit) {
* no such bit exists.
*/
static inline ssize_t
fb_flu(fb_group_t *fb, size_t nbits, size_t max_bit) {
fb_flu(const fb_group_t *fb, size_t nbits, size_t max_bit) {
return fb_find_impl(fb, nbits, max_bit, /* val */ false,
/* forward */ false);
}
static inline ssize_t
fb_fls(fb_group_t *fb, size_t nbits, size_t max_bit) {
fb_fls(const fb_group_t *fb, size_t nbits, size_t max_bit) {
return fb_find_impl(fb, nbits, max_bit, /* val */ true,
/* forward */ false);
}
/* Returns whether or not we found a range. */
JEMALLOC_ALWAYS_INLINE bool
fb_iter_range_impl(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
size_t *r_len, bool val, bool forward) {
fb_iter_range_impl(const fb_group_t *fb, size_t nbits, size_t start,
size_t *r_begin, size_t *r_len, bool val, bool forward) {
assert(start < nbits);
ssize_t next_range_begin = fb_find_impl(fb, nbits, start, val, forward);
if ((forward && next_range_begin == (ssize_t)nbits)
@ -286,8 +292,8 @@ fb_iter_range_impl(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
* touching *r_begin or *r_end).
*/
static inline bool
fb_srange_iter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
size_t *r_len) {
fb_srange_iter(const fb_group_t *fb, size_t nbits, size_t start,
size_t *r_begin, size_t *r_len) {
return fb_iter_range_impl(fb, nbits, start, r_begin, r_len,
/* val */ true, /* forward */ true);
}
@ -297,30 +303,30 @@ fb_srange_iter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
* forwards. (The position returned is still the earliest bit in the range).
*/
static inline bool
fb_srange_riter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
size_t *r_len) {
fb_srange_riter(const fb_group_t *fb, size_t nbits, size_t start,
size_t *r_begin, size_t *r_len) {
return fb_iter_range_impl(fb, nbits, start, r_begin, r_len,
/* val */ true, /* forward */ false);
}
/* Similar to fb_srange_iter, but searches for unset bits. */
static inline bool
fb_urange_iter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
size_t *r_len) {
fb_urange_iter(const fb_group_t *fb, size_t nbits, size_t start,
size_t *r_begin, size_t *r_len) {
return fb_iter_range_impl(fb, nbits, start, r_begin, r_len,
/* val */ false, /* forward */ true);
}
/* Similar to fb_srange_riter, but searches for unset bits. */
static inline bool
fb_urange_riter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
size_t *r_len) {
fb_urange_riter(const fb_group_t *fb, size_t nbits, size_t start,
size_t *r_begin, size_t *r_len) {
return fb_iter_range_impl(fb, nbits, start, r_begin, r_len,
/* val */ false, /* forward */ false);
}
JEMALLOC_ALWAYS_INLINE size_t
fb_range_longest_impl(fb_group_t *fb, size_t nbits, bool val) {
fb_range_longest_impl(const fb_group_t *fb, size_t nbits, bool val) {
size_t begin = 0;
size_t longest_len = 0;
size_t len = 0;
@ -336,12 +342,12 @@ fb_range_longest_impl(fb_group_t *fb, size_t nbits, bool val) {
}
static inline size_t
fb_srange_longest(fb_group_t *fb, size_t nbits) {
fb_srange_longest(const fb_group_t *fb, size_t nbits) {
return fb_range_longest_impl(fb, nbits, /* val */ true);
}
static inline size_t
fb_urange_longest(fb_group_t *fb, size_t nbits) {
fb_urange_longest(const fb_group_t *fb, size_t nbits) {
return fb_range_longest_impl(fb, nbits, /* val */ false);
}

View file

@ -209,7 +209,7 @@ hpdata_allow_hugify(hpdata_t *hpdata, nstime_t now) {
}
static inline nstime_t
hpdata_time_hugify_allowed(hpdata_t *hpdata) {
hpdata_time_hugify_allowed(const hpdata_t *hpdata) {
return hpdata->h_time_hugify_allowed;
}
@ -305,7 +305,7 @@ hpdata_ndirty_get(const hpdata_t *hpdata) {
}
static inline size_t
hpdata_nretained_get(hpdata_t *hpdata) {
hpdata_nretained_get(const hpdata_t *hpdata) {
return HUGEPAGE_PAGES - hpdata->h_ntouched;
}
@ -330,7 +330,7 @@ hpdata_purged_when_empty_and_huge_set(hpdata_t *hpdata, bool v) {
}
static inline void
hpdata_assert_empty(hpdata_t *hpdata) {
hpdata_assert_empty(const hpdata_t *hpdata) {
assert(fb_empty(hpdata->active_pages, HUGEPAGE_PAGES));
assert(hpdata->h_nactive == 0);
}
@ -341,7 +341,7 @@ hpdata_assert_empty(hpdata_t *hpdata) {
* match computed ones).
*/
static inline bool
hpdata_consistent(hpdata_t *hpdata) {
hpdata_consistent(const hpdata_t *hpdata) {
bool res = true;
const size_t active_urange_longest = fb_urange_longest(

View file

@ -98,7 +98,7 @@ arena_ichoose(tsd_t *tsd, arena_t *arena) {
}
static inline bool
arena_is_auto(arena_t *arena) {
arena_is_auto(const arena_t *arena) {
assert(narenas_auto > 0);
return (arena_ind_get(arena) < manual_arena_base);

View file

@ -120,7 +120,7 @@ pa_shard_dont_decay_muzzy(pa_shard_t *shard) {
}
static inline ehooks_t *
pa_shard_ehooks_get(pa_shard_t *shard) {
pa_shard_ehooks_get(const pa_shard_t *shard) {
return base_ehooks_get(shard->base);
}
@ -221,12 +221,12 @@ void pa_shard_prefork5(tsdn_t *tsdn, pa_shard_t *shard);
void pa_shard_postfork_parent(tsdn_t *tsdn, pa_shard_t *shard);
void pa_shard_postfork_child(tsdn_t *tsdn, pa_shard_t *shard);
size_t pa_shard_nactive(pa_shard_t *shard);
size_t pa_shard_ndirty(pa_shard_t *shard);
size_t pa_shard_nmuzzy(pa_shard_t *shard);
size_t pa_shard_nactive(const pa_shard_t *shard);
size_t pa_shard_ndirty(const pa_shard_t *shard);
size_t pa_shard_nmuzzy(const pa_shard_t *shard);
void pa_shard_basic_stats_merge(
pa_shard_t *shard, size_t *nactive, size_t *ndirty, size_t *nmuzzy);
const pa_shard_t *shard, size_t *nactive, size_t *ndirty, size_t *nmuzzy);
void pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard,
pa_shard_stats_t *pa_shard_stats_out, pac_estats_t *estats_out,

View file

@ -156,12 +156,12 @@ bool pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
malloc_mutex_t *stats_mtx);
static inline size_t
pac_mapped(pac_t *pac) {
pac_mapped(const pac_t *pac) {
return atomic_load_zu(&pac->stats->pac_mapped, ATOMIC_RELAXED);
}
static inline ehooks_t *
pac_ehooks_get(pac_t *pac) {
pac_ehooks_get(const pac_t *pac) {
return base_ehooks_get(pac->base);
}

View file

@ -134,17 +134,17 @@ void psset_insert(psset_t *psset, hpdata_t *ps);
void psset_remove(psset_t *psset, hpdata_t *ps);
static inline size_t
psset_npageslabs(psset_t *psset) {
psset_npageslabs(const psset_t *psset) {
return psset->stats.merged.npageslabs;
}
static inline size_t
psset_nactive(psset_t *psset) {
psset_nactive(const psset_t *psset) {
return psset->stats.merged.nactive;
}
static inline size_t
psset_ndirty(psset_t *psset) {
psset_ndirty(const psset_t *psset) {
return psset->stats.merged.ndirty;
}

View file

@ -19,7 +19,7 @@ tcache_enabled_get(tsd_t *tsd) {
}
static inline unsigned
tcache_nbins_get(tcache_slow_t *tcache_slow) {
tcache_nbins_get(const tcache_slow_t *tcache_slow) {
assert(tcache_slow != NULL);
unsigned nbins = tcache_slow->tcache_nbins;
assert(nbins <= TCACHE_NBINS_MAX);
@ -27,7 +27,7 @@ tcache_nbins_get(tcache_slow_t *tcache_slow) {
}
static inline size_t
tcache_max_get(tcache_slow_t *tcache_slow) {
tcache_max_get(const tcache_slow_t *tcache_slow) {
assert(tcache_slow != NULL);
size_t tcache_max = sz_index2size(tcache_nbins_get(tcache_slow) - 1);
assert(tcache_max <= TCACHE_MAXCLASS_LIMIT);

View file

@ -343,7 +343,8 @@ arena_extent_alloc_large(
}
void
arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, edata_t *edata) {
arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena,
const edata_t *edata) {
if (config_stats) {
arena_large_dalloc_stats_update(
tsdn, arena, edata_usize_get(edata));
@ -352,7 +353,7 @@ arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, edata_t *edata) {
void
arena_extent_ralloc_large_shrink(
tsdn_t *tsdn, arena_t *arena, edata_t *edata, size_t oldusize) {
tsdn_t *tsdn, arena_t *arena, const edata_t *edata, size_t oldusize) {
size_t usize = edata_usize_get(edata);
if (config_stats) {
@ -362,7 +363,7 @@ arena_extent_ralloc_large_shrink(
void
arena_extent_ralloc_large_expand(
tsdn_t *tsdn, arena_t *arena, edata_t *edata, size_t oldusize) {
tsdn_t *tsdn, arena_t *arena, const edata_t *edata, size_t oldusize) {
size_t usize = edata_usize_get(edata);
if (config_stats) {
@ -1662,7 +1663,7 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
}
ehooks_t *
arena_get_ehooks(arena_t *arena) {
arena_get_ehooks(const arena_t *arena) {
return base_ehooks_get(arena->base);
}
@ -1685,7 +1686,7 @@ arena_set_extent_hooks(
}
dss_prec_t
arena_dss_prec_get(arena_t *arena) {
arena_dss_prec_get(const arena_t *arena) {
return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE);
}
@ -1699,8 +1700,9 @@ arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) {
}
void
arena_name_get(arena_t *arena, char *name) {
char *end = (char *)memchr((void *)arena->name, '\0', ARENA_NAME_LEN);
arena_name_get(const arena_t *arena, char *name) {
const char *end = (const char *)memchr(
arena->name, '\0', ARENA_NAME_LEN);
assert(end != NULL);
size_t len = (uintptr_t)end - (uintptr_t)arena->name + 1;
assert(len > 0 && len <= ARENA_NAME_LEN);
@ -1751,7 +1753,7 @@ arena_retain_grow_limit_get_set(
}
unsigned
arena_nthreads_get(arena_t *arena, bool internal) {
arena_nthreads_get(const arena_t *arena, bool internal) {
return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED);
}

View file

@ -112,7 +112,7 @@ label_done:
}
static inline bool
base_edata_is_reused(edata_t *edata) {
base_edata_is_reused(const edata_t *edata) {
/*
* Borrow the guarded bit to indicate if the extent is a recycled one,
* i.e. the ones returned to base for reuse; currently only tcache bin
@ -133,8 +133,8 @@ base_edata_init(
}
static size_t
base_get_num_blocks(base_t *base, bool with_new_block) {
base_block_t *b = base->blocks;
base_get_num_blocks(const base_t *base, bool with_new_block) {
const base_block_t *b = base->blocks;
assert(b != NULL);
size_t n_blocks = with_new_block ? 2 : 1;

View file

@ -32,17 +32,17 @@ eset_init(eset_t *eset, extent_state_t state) {
}
size_t
eset_npages_get(eset_t *eset) {
eset_npages_get(const eset_t *eset) {
return atomic_load_zu(&eset->npages, ATOMIC_RELAXED);
}
size_t
eset_nextents_get(eset_t *eset, pszind_t pind) {
eset_nextents_get(const eset_t *eset, pszind_t pind) {
return atomic_load_zu(&eset->bin_stats[pind].nextents, ATOMIC_RELAXED);
}
size_t
eset_nbytes_get(eset_t *eset, pszind_t pind) {
eset_nbytes_get(const eset_t *eset, pszind_t pind) {
return atomic_load_zu(&eset->bin_stats[pind].nbytes, ATOMIC_RELAXED);
}

View file

@ -73,12 +73,12 @@ pa_shard_postfork_child(tsdn_t *tsdn, pa_shard_t *shard) {
}
size_t
pa_shard_nactive(pa_shard_t *shard) {
pa_shard_nactive(const pa_shard_t *shard) {
return atomic_load_zu(&shard->nactive, ATOMIC_RELAXED);
}
size_t
pa_shard_ndirty(pa_shard_t *shard) {
pa_shard_ndirty(const pa_shard_t *shard) {
size_t ndirty = ecache_npages_get(&shard->pac.ecache_dirty);
if (shard->ever_used_hpa) {
ndirty += psset_ndirty(&shard->hpa_shard.psset);
@ -87,13 +87,13 @@ pa_shard_ndirty(pa_shard_t *shard) {
}
size_t
pa_shard_nmuzzy(pa_shard_t *shard) {
pa_shard_nmuzzy(const pa_shard_t *shard) {
return ecache_npages_get(&shard->pac.ecache_muzzy);
}
void
pa_shard_basic_stats_merge(
pa_shard_t *shard, size_t *nactive, size_t *ndirty, size_t *nmuzzy) {
const pa_shard_t *shard, size_t *nactive, size_t *ndirty, size_t *nmuzzy) {
*nactive += pa_shard_nactive(shard);
*ndirty += pa_shard_ndirty(shard);
*nmuzzy += pa_shard_nmuzzy(shard);