mirror of
https://github.com/jemalloc/jemalloc.git
synced 2026-04-14 22:51:50 +03:00
Move bin functions from arena.c to bin.c
This is a clean-up change that gives the bin functions implemented in the area code a prefix of bin_ and moves them into the bin code. To further decouple the bin code from the arena code, bin functions that had taken an arena_t to check arena_is_auto now take an is_auto parameter instead.
This commit is contained in:
parent
c73ab1c2ff
commit
1cc563f531
7 changed files with 326 additions and 305 deletions
|
|
@ -79,10 +79,6 @@ void arena_dalloc_promoted(
|
|||
tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path);
|
||||
void arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab);
|
||||
|
||||
void arena_dalloc_bin_locked_handle_newly_empty(
|
||||
tsdn_t *tsdn, arena_t *arena, edata_t *slab, bin_t *bin);
|
||||
void arena_dalloc_bin_locked_handle_newly_nonempty(
|
||||
tsdn_t *tsdn, arena_t *arena, edata_t *slab, bin_t *bin);
|
||||
void arena_dalloc_small(tsdn_t *tsdn, void *ptr);
|
||||
void arena_ptr_array_flush(tsd_t *tsd, szind_t binind,
|
||||
cache_bin_ptr_array_t *arr, unsigned nflush, bool small,
|
||||
|
|
@ -111,8 +107,6 @@ void arena_nthreads_dec(arena_t *arena, bool internal);
|
|||
arena_t *arena_new(tsdn_t *tsdn, unsigned ind, const arena_config_t *config);
|
||||
bool arena_init_huge(tsdn_t *tsdn, arena_t *a0);
|
||||
arena_t *arena_choose_huge(tsd_t *tsd);
|
||||
bin_t *arena_bin_choose(
|
||||
tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned *binshard);
|
||||
size_t arena_fill_small_fresh(tsdn_t *tsdn, arena_t *arena, szind_t binind,
|
||||
void **ptrs, size_t nfill, bool zero);
|
||||
bool arena_boot(sc_data_t *sc_data, base_t *base, bool hpa);
|
||||
|
|
|
|||
|
|
@ -609,12 +609,12 @@ arena_dalloc_bin_locked_step(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
|
|||
|
||||
unsigned nfree = edata_nfree_get(slab);
|
||||
if (nfree == bin_info->nregs) {
|
||||
arena_dalloc_bin_locked_handle_newly_empty(
|
||||
tsdn, arena, slab, bin);
|
||||
bin_dalloc_locked_handle_newly_empty(
|
||||
tsdn, arena_is_auto(arena), slab, bin);
|
||||
return true;
|
||||
} else if (nfree == 1 && slab != bin->slabcur) {
|
||||
arena_dalloc_bin_locked_handle_newly_nonempty(
|
||||
tsdn, arena, slab, bin);
|
||||
bin_dalloc_locked_handle_newly_nonempty(
|
||||
tsdn, arena_is_auto(arena), slab, bin);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@
|
|||
#define JEMALLOC_INTERNAL_BIN_H
|
||||
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/bin_info.h"
|
||||
#include "jemalloc/internal/bin_stats.h"
|
||||
#include "jemalloc/internal/bin_types.h"
|
||||
#include "jemalloc/internal/edata.h"
|
||||
|
|
@ -61,6 +62,43 @@ void bin_prefork(tsdn_t *tsdn, bin_t *bin);
|
|||
void bin_postfork_parent(tsdn_t *tsdn, bin_t *bin);
|
||||
void bin_postfork_child(tsdn_t *tsdn, bin_t *bin);
|
||||
|
||||
/* Slab region allocation. */
|
||||
void *bin_slab_reg_alloc(edata_t *slab, const bin_info_t *bin_info);
|
||||
void bin_slab_reg_alloc_batch(
|
||||
edata_t *slab, const bin_info_t *bin_info, unsigned cnt, void **ptrs);
|
||||
|
||||
/* Slab list management. */
|
||||
void bin_slabs_nonfull_insert(bin_t *bin, edata_t *slab);
|
||||
void bin_slabs_nonfull_remove(bin_t *bin, edata_t *slab);
|
||||
edata_t *bin_slabs_nonfull_tryget(bin_t *bin);
|
||||
void bin_slabs_full_insert(bool is_auto, bin_t *bin, edata_t *slab);
|
||||
void bin_slabs_full_remove(bool is_auto, bin_t *bin, edata_t *slab);
|
||||
|
||||
/* Slab association / demotion. */
|
||||
void bin_dissociate_slab(bool is_auto, edata_t *slab, bin_t *bin);
|
||||
void bin_lower_slab(tsdn_t *tsdn, bool is_auto, edata_t *slab, bin_t *bin);
|
||||
|
||||
/* Deallocation helpers (called under bin lock). */
|
||||
void bin_dalloc_slab_prepare(tsdn_t *tsdn, edata_t *slab, bin_t *bin);
|
||||
void bin_dalloc_locked_handle_newly_empty(
|
||||
tsdn_t *tsdn, bool is_auto, edata_t *slab, bin_t *bin);
|
||||
void bin_dalloc_locked_handle_newly_nonempty(
|
||||
tsdn_t *tsdn, bool is_auto, edata_t *slab, bin_t *bin);
|
||||
|
||||
/* Slabcur refill and allocation. */
|
||||
void bin_refill_slabcur_with_fresh_slab(tsdn_t *tsdn, bin_t *bin,
|
||||
szind_t binind, edata_t *fresh_slab);
|
||||
void *bin_malloc_with_fresh_slab(tsdn_t *tsdn, bin_t *bin,
|
||||
szind_t binind, edata_t *fresh_slab);
|
||||
bool bin_refill_slabcur_no_fresh_slab(tsdn_t *tsdn, bool is_auto,
|
||||
bin_t *bin);
|
||||
void *bin_malloc_no_fresh_slab(tsdn_t *tsdn, bool is_auto, bin_t *bin,
|
||||
szind_t binind);
|
||||
|
||||
/* Bin selection. */
|
||||
bin_t *bin_choose(tsdn_t *tsdn, arena_t *arena, szind_t binind,
|
||||
unsigned *binshard_p);
|
||||
|
||||
/* Stats. */
|
||||
static inline void
|
||||
bin_stats_merge(tsdn_t *tsdn, bin_stats_data_t *dst_bin_stats, bin_t *bin) {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue