diff --git a/include/jemalloc/internal/pai.h b/include/jemalloc/internal/pai.h index f8f7d667..d978cd7d 100644 --- a/include/jemalloc/internal/pai.h +++ b/include/jemalloc/internal/pai.h @@ -7,7 +7,7 @@ typedef struct pai_s pai_t; struct pai_s { /* Returns NULL on failure. */ edata_t *(*alloc)(tsdn_t *tsdn, pai_t *self, size_t size, - size_t alignment, bool zero, bool guarded, + size_t alignment, bool zero, bool guarded, bool frequent_reuse, bool *deferred_work_generated); /* * Returns the number of extents added to the list (which may be fewer @@ -37,10 +37,11 @@ struct pai_s { */ static inline edata_t * -pai_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero, - bool guarded, bool *deferred_work_generated) { +pai_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, + bool zero, bool guarded, bool frequent_reuse, + bool *deferred_work_generated) { return self->alloc(tsdn, self, size, alignment, zero, guarded, - deferred_work_generated); + frequent_reuse, deferred_work_generated); } static inline size_t diff --git a/src/hpa.c b/src/hpa.c index caf122b7..0a7ec19e 100644 --- a/src/hpa.c +++ b/src/hpa.c @@ -9,7 +9,8 @@ #define HPA_EDEN_SIZE (128 * HUGEPAGE) static edata_t *hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size, - size_t alignment, bool zero, bool guarded, bool *deferred_work_generated); + size_t alignment, bool zero, bool guarded, bool frequent_reuse, + bool *deferred_work_generated); static size_t hpa_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs, edata_list_active_t *results, bool *deferred_work_generated); static bool hpa_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, @@ -760,7 +761,7 @@ hpa_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs, static edata_t * hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero, - bool guarded, bool *deferred_work_generated) { + bool guarded, bool frequent_reuse, bool *deferred_work_generated) { assert((size & PAGE_MASK) == 0); assert(!guarded); witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), diff --git a/src/pa.c b/src/pa.c index 9004cc90..0f95e93a 100644 --- a/src/pa.c +++ b/src/pa.c @@ -128,7 +128,7 @@ pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment, edata_t *edata = NULL; if (!guarded && pa_shard_uses_hpa(shard)) { edata = pai_alloc(tsdn, &shard->hpa_sec.pai, size, alignment, - zero, /* guarded */ false, deferred_work_generated); + zero, /* guarded */ false, slab, deferred_work_generated); } /* * Fall back to the PAC if the HPA is off or couldn't serve the given @@ -136,7 +136,7 @@ pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment, */ if (edata == NULL) { edata = pai_alloc(tsdn, &shard->pac.pai, size, alignment, zero, - guarded, deferred_work_generated); + guarded, slab, deferred_work_generated); } if (edata != NULL) { assert(edata_size_get(edata) == size); diff --git a/src/pac.c b/src/pac.c index 914cec90..e1f60025 100644 --- a/src/pac.c +++ b/src/pac.c @@ -5,7 +5,8 @@ #include "jemalloc/internal/san.h" static edata_t *pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size, - size_t alignment, bool zero, bool guarded, bool *deferred_work_generated); + size_t alignment, bool zero, bool guarded, bool frequent_reuse, + bool *deferred_work_generated); static bool pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated); static bool pac_shrink_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, @@ -152,7 +153,8 @@ pac_alloc_new_guarded(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size, static edata_t * pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, - bool zero, bool guarded, bool *deferred_work_generated) { + bool zero, bool guarded, bool frequent_reuse, + bool *deferred_work_generated) { pac_t *pac = (pac_t *)self; ehooks_t *ehooks = pac_ehooks_get(pac); diff --git a/src/pai.c b/src/pai.c index 86b8ee5b..45c87729 100644 --- a/src/pai.c +++ b/src/pai.c @@ -7,7 +7,8 @@ pai_alloc_batch_default(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs, for (size_t i = 0; i < nallocs; i++) { bool deferred_by_alloc = false; edata_t *edata = pai_alloc(tsdn, self, size, PAGE, - /* zero */ false, /* guarded */ false, &deferred_by_alloc); + /* zero */ false, /* guarded */ false, + /* frequent_reuse */ false, &deferred_by_alloc); *deferred_work_generated |= deferred_by_alloc; if (edata == NULL) { return i; diff --git a/src/sec.c b/src/sec.c index d99c4439..0c4e7032 100644 --- a/src/sec.c +++ b/src/sec.c @@ -4,7 +4,8 @@ #include "jemalloc/internal/sec.h" static edata_t *sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size, - size_t alignment, bool zero, bool guarded, bool *deferred_work_generated); + size_t alignment, bool zero, bool guarded, bool frequent_reuse, + bool *deferred_work_generated); static bool sec_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated); static bool sec_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata, @@ -218,7 +219,7 @@ sec_batch_fill_and_alloc(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard, static edata_t * sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero, - bool guarded, bool *deferred_work_generated) { + bool guarded, bool frequent_reuse, bool *deferred_work_generated) { assert((size & PAGE_MASK) == 0); assert(!guarded); @@ -227,7 +228,8 @@ sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero, if (zero || alignment > PAGE || sec->opts.nshards == 0 || size > sec->opts.max_alloc) { return pai_alloc(tsdn, sec->fallback, size, alignment, zero, - /* guarded */ false, deferred_work_generated); + /* guarded */ false, frequent_reuse, + deferred_work_generated); } pszind_t pszind = sz_psz2ind(size); sec_shard_t *shard = sec_shard_pick(tsdn, sec); @@ -250,7 +252,8 @@ sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero, size); } else { edata = pai_alloc(tsdn, sec->fallback, size, alignment, - zero, /* guarded */ false, deferred_work_generated); + zero, /* guarded */ false, frequent_reuse, + deferred_work_generated); } } return edata; diff --git a/test/unit/hpa.c b/test/unit/hpa.c index a63d51d4..25ee1950 100644 --- a/test/unit/hpa.c +++ b/test/unit/hpa.c @@ -81,10 +81,10 @@ TEST_BEGIN(test_alloc_max) { /* Small max */ bool deferred_work_generated = false; edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX, PAGE, false, false, - &deferred_work_generated); + false, &deferred_work_generated); expect_ptr_not_null(edata, "Allocation of small max failed"); edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX + PAGE, PAGE, false, - false, &deferred_work_generated); + false, false, &deferred_work_generated); expect_ptr_null(edata, "Allocation of larger than small max succeeded"); destroy_test_data(shard); @@ -188,7 +188,7 @@ TEST_BEGIN(test_stress) { size_t npages = npages_min + prng_range_zu(&prng_state, npages_max - npages_min); edata_t *edata = pai_alloc(tsdn, &shard->pai, - npages * PAGE, PAGE, false, false, + npages * PAGE, PAGE, false, false, false, &deferred_work_generated); assert_ptr_not_null(edata, "Unexpected allocation failure"); @@ -264,7 +264,7 @@ TEST_BEGIN(test_alloc_dalloc_batch) { for (size_t i = 0; i < NALLOCS / 2; i++) { allocs[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, /* zero */ false, /* guarded */ false, - &deferred_work_generated); + /* frequent_reuse */ false, &deferred_work_generated); expect_ptr_not_null(allocs[i], "Unexpected alloc failure"); } edata_list_active_t allocs_list; @@ -300,8 +300,8 @@ TEST_BEGIN(test_alloc_dalloc_batch) { /* Reallocate (individually), and ensure reuse and contiguity. */ for (size_t i = 0; i < NALLOCS; i++) { allocs[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, - /* zero */ false, /* guarded */ false, - &deferred_work_generated); + /* zero */ false, /* guarded */ false, /* frequent_reuse */ + false, &deferred_work_generated); expect_ptr_not_null(allocs[i], "Unexpected alloc failure."); } void *new_base = edata_base_get(allocs[0]); @@ -376,7 +376,7 @@ TEST_BEGIN(test_defer_time) { edata_t *edatas[HUGEPAGE_PAGES]; for (int i = 0; i < (int)HUGEPAGE_PAGES; i++) { edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false, - false, &deferred_work_generated); + false, false, &deferred_work_generated); expect_ptr_not_null(edatas[i], "Unexpected null edata"); } hpa_shard_do_deferred_work(tsdn, shard); @@ -410,7 +410,7 @@ TEST_BEGIN(test_defer_time) { */ for (int i = 0; i < (int)HUGEPAGE_PAGES / 2; i++) { edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false, - false, &deferred_work_generated); + false, false, &deferred_work_generated); expect_ptr_not_null(edatas[i], "Unexpected null edata"); } /* diff --git a/test/unit/sec.c b/test/unit/sec.c index 8ac3411c..e98bdc92 100644 --- a/test/unit/sec.c +++ b/test/unit/sec.c @@ -50,7 +50,7 @@ test_sec_init(sec_t *sec, pai_t *fallback, size_t nshards, size_t max_alloc, static inline edata_t * pai_test_allocator_alloc(tsdn_t *tsdn, pai_t *self, size_t size, - size_t alignment, bool zero, bool guarded, + size_t alignment, bool zero, bool guarded, bool frequent_reuse, bool *deferred_work_generated) { assert(!guarded); pai_test_allocator_t *ta = (pai_test_allocator_t *)self; @@ -178,12 +178,12 @@ TEST_BEGIN(test_reuse) { /* max_bytes */ 2 * (NALLOCS * PAGE + NALLOCS * 2 * PAGE)); for (int i = 0; i < NALLOCS; i++) { one_page[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, - /* zero */ false, /* guarded */ false, - &deferred_work_generated); + /* zero */ false, /* guarded */ false, /* frequent_reuse */ + false, &deferred_work_generated); expect_ptr_not_null(one_page[i], "Unexpected alloc failure"); two_page[i] = pai_alloc(tsdn, &sec.pai, 2 * PAGE, PAGE, - /* zero */ false, /* guarded */ false, - &deferred_work_generated); + /* zero */ false, /* guarded */ false, /* frequent_reuse */ + false, &deferred_work_generated); expect_ptr_not_null(one_page[i], "Unexpected alloc failure"); } expect_zu_eq(0, ta.alloc_count, "Should be using batch allocs"); @@ -214,11 +214,11 @@ TEST_BEGIN(test_reuse) { */ for (int i = 0; i < NALLOCS; i++) { edata_t *alloc1 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, - /* zero */ false, /* guarded */ false, - &deferred_work_generated); + /* zero */ false, /* guarded */ false, /* frequent_reuse */ + false, &deferred_work_generated); edata_t *alloc2 = pai_alloc(tsdn, &sec.pai, 2 * PAGE, PAGE, - /* zero */ false, /* guarded */ false, - &deferred_work_generated); + /* zero */ false, /* guarded */ false, /* frequent_reuse */ + false, &deferred_work_generated); expect_ptr_eq(one_page[i], alloc1, "Got unexpected allocation"); expect_ptr_eq(two_page[i], alloc2, @@ -255,12 +255,13 @@ TEST_BEGIN(test_auto_flush) { /* max_bytes */ NALLOCS * PAGE); for (int i = 0; i < NALLOCS; i++) { allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, - /* zero */ false, /* guarded */ false, - &deferred_work_generated); + /* zero */ false, /* guarded */ false, /* frequent_reuse */ + false, &deferred_work_generated); expect_ptr_not_null(allocs[i], "Unexpected alloc failure"); } extra_alloc = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false, - /* guarded */ false, &deferred_work_generated); + /* guarded */ false, /* frequent_reuse */ false, + &deferred_work_generated); expect_ptr_not_null(extra_alloc, "Unexpected alloc failure"); size_t max_allocs = ta.alloc_count + ta.alloc_batch_count; expect_zu_le(NALLOCS + 1, max_allocs, @@ -311,8 +312,8 @@ do_disable_flush_test(bool is_disable) { /* max_bytes */ NALLOCS * PAGE); for (int i = 0; i < NALLOCS; i++) { allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, - /* zero */ false, /* guarded */ false, - &deferred_work_generated); + /* zero */ false, /* guarded */ false, /* frequent_reuse */ + false, &deferred_work_generated); expect_ptr_not_null(allocs[i], "Unexpected alloc failure"); } /* Free all but the last aloc. */ @@ -386,7 +387,7 @@ TEST_BEGIN(test_max_alloc_respected) { "Incorrect number of deallocations"); edata_t *edata = pai_alloc(tsdn, &sec.pai, attempted_alloc, PAGE, /* zero */ false, /* guarded */ false, - &deferred_work_generated); + /* frequent_reuse */ false, &deferred_work_generated); expect_ptr_not_null(edata, "Unexpected alloc failure"); expect_zu_eq(i + 1, ta.alloc_count, "Incorrect number of allocations"); @@ -413,7 +414,7 @@ TEST_BEGIN(test_expand_shrink_delegate) { test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ 10 * PAGE, /* max_bytes */ 1000 * PAGE); edata_t *edata = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, - /* zero */ false, /* guarded */ false, + /* zero */ false, /* guarded */ false, /* frequent_reuse */ false, &deferred_work_generated); expect_ptr_not_null(edata, "Unexpected alloc failure"); @@ -454,7 +455,7 @@ TEST_BEGIN(test_nshards_0) { bool deferred_work_generated = false; edata_t *edata = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, - /* zero */ false, /* guarded */ false, + /* zero */ false, /* guarded */ false, /* frequent_reuse */ false, &deferred_work_generated); pai_dalloc(tsdn, &sec.pai, edata, &deferred_work_generated); @@ -497,8 +498,8 @@ TEST_BEGIN(test_stats_simple) { edata_t *allocs[FLUSH_PAGES]; for (size_t i = 0; i < FLUSH_PAGES; i++) { allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, - /* zero */ false, /* guarded */ false, - &deferred_work_generated); + /* zero */ false, /* guarded */ false, /* frequent_reuse */ + false, &deferred_work_generated); expect_stats_pages(tsdn, &sec, 0); } @@ -512,6 +513,7 @@ TEST_BEGIN(test_stats_simple) { for (size_t j = 0; j < FLUSH_PAGES / 2; j++) { allocs[j] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false, /* guarded */ false, + /* frequent_reuse */ false, &deferred_work_generated); expect_stats_pages(tsdn, &sec, FLUSH_PAGES / 2 - j - 1); } @@ -541,14 +543,16 @@ TEST_BEGIN(test_stats_auto_flush) { bool deferred_work_generated = false; extra_alloc0 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false, - /* guarded */ false, &deferred_work_generated); + /* guarded */ false, /* frequent_reuse */ false, + &deferred_work_generated); extra_alloc1 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false, - /* guarded */ false, &deferred_work_generated); + /* guarded */ false, /* frequent_reuse */ false, + &deferred_work_generated); for (size_t i = 0; i < 2 * FLUSH_PAGES; i++) { allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, - /* zero */ false, /* guarded */ false, - &deferred_work_generated); + /* zero */ false, /* guarded */ false, /* frequent_reuse */ + false, &deferred_work_generated); } for (size_t i = 0; i < FLUSH_PAGES; i++) { @@ -588,8 +592,8 @@ TEST_BEGIN(test_stats_manual_flush) { edata_t *allocs[FLUSH_PAGES]; for (size_t i = 0; i < FLUSH_PAGES; i++) { allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, - /* zero */ false, /* guarded */ false, - &deferred_work_generated); + /* zero */ false, /* guarded */ false, /* frequent_reuse */ + false, &deferred_work_generated); expect_stats_pages(tsdn, &sec, 0); }